xref: /freebsd/tests/sys/fs/fusefs/io.cc (revision 0077477f215c851fe15c9ea12cfb005125c4238a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 The FreeBSD Foundation
5  *
6  * This software was developed by BFF Storage Systems, LLC under sponsorship
7  * from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 extern "C" {
32 #include <sys/types.h>
33 #include <sys/mman.h>
34 #include <sys/sysctl.h>
35 
36 #include <fcntl.h>
37 #include <stdlib.h>
38 #include <unistd.h>
39 }
40 
41 #include <iomanip>
42 
43 #include "mockfs.hh"
44 #include "utils.hh"
45 
46 /*
47  * For testing I/O like fsx does, but deterministically and without a real
48  * underlying file system
49  */
50 
51 using namespace testing;
52 
53 const char FULLPATH[] = "mountpoint/some_file.txt";
54 const char RELPATH[] = "some_file.txt";
55 const uint64_t ino = 42;
56 
compare(const void * tbuf,const void * controlbuf,off_t baseofs,ssize_t size)57 static void compare(const void *tbuf, const void *controlbuf, off_t baseofs,
58 	ssize_t size)
59 {
60 	int i;
61 
62 	for (i = 0; i < size; i++) {
63 		if (((const char*)tbuf)[i] != ((const char*)controlbuf)[i]) {
64 			off_t ofs = baseofs + i;
65 			FAIL() << "miscompare at offset "
66 			       << std::hex
67 			       << std::showbase
68 			       << ofs
69 			       << ".  expected = "
70 			       << std::setw(2)
71 			       << (unsigned)((const uint8_t*)controlbuf)[i]
72 			       << " got = "
73 			       << (unsigned)((const uint8_t*)tbuf)[i];
74 		}
75 	}
76 }
77 
78 typedef tuple<bool, uint32_t, cache_mode, uint32_t> IoParam;
79 
80 class Io: public FuseTest, public WithParamInterface<IoParam> {
81 public:
82 int m_backing_fd, m_control_fd, m_test_fd;
83 off_t m_filesize;
84 bool m_direct_io;
85 
Io()86 Io(): m_backing_fd(-1), m_control_fd(-1), m_test_fd(-1), m_filesize(0),
87 	m_direct_io(false) {};
88 
SetUp()89 void SetUp()
90 {
91 	m_backing_fd = open("backing_file", O_RDWR | O_CREAT | O_TRUNC, 0644);
92 	if (m_backing_fd < 0)
93 		FAIL() << strerror(errno);
94 	m_control_fd = open("control", O_RDWR | O_CREAT | O_TRUNC, 0644);
95 	if (m_control_fd < 0)
96 		FAIL() << strerror(errno);
97 	srandom(22'9'1982);	// Seed with my birthday
98 
99 	if (get<0>(GetParam()))
100 		m_init_flags |= FUSE_ASYNC_READ;
101 	m_maxwrite = get<1>(GetParam());
102 	switch (get<2>(GetParam())) {
103 		case Uncached:
104 			m_direct_io = true;
105 			break;
106 		case WritebackAsync:
107 			m_async = true;
108 			/* FALLTHROUGH */
109 		case Writeback:
110 			m_init_flags |= FUSE_WRITEBACK_CACHE;
111 			/* FALLTHROUGH */
112 		case Writethrough:
113 			break;
114 		default:
115 			FAIL() << "Unknown cache mode";
116 	}
117 	m_kernel_minor_version = get<3>(GetParam());
118 	m_noatime = true;	// To prevent SETATTR for atime on close
119 
120 	FuseTest::SetUp();
121 	if (IsSkipped())
122 		return;
123 
124 	if (verbosity > 0) {
125 		printf("Test Parameters: init_flags=%#x maxwrite=%#x "
126 		    "%sasync cache=%s kernel_minor_version=%d\n",
127 		    m_init_flags, m_maxwrite, m_async? "" : "no",
128 		    cache_mode_to_s(get<2>(GetParam())),
129 		    m_kernel_minor_version);
130 	}
131 
132 	expect_lookup(RELPATH, ino, S_IFREG | 0644, 0, 1);
133 	expect_open(ino, m_direct_io ? FOPEN_DIRECT_IO : 0, 1);
134 	EXPECT_CALL(*m_mock, process(
135 		ResultOf([=](auto in) {
136 			return (in.header.opcode == FUSE_WRITE &&
137 				in.header.nodeid == ino);
138 		}, Eq(true)),
139 		_)
140 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
141 		const char *buf = (const char*)in.body.bytes +
142 			sizeof(struct fuse_write_in);
143 		ssize_t isize = in.body.write.size;
144 		off_t iofs = in.body.write.offset;
145 
146 		assert((size_t)isize <= sizeof(in.body.bytes) -
147 			sizeof(struct fuse_write_in));
148 		ASSERT_EQ(isize, pwrite(m_backing_fd, buf, isize, iofs))
149 			<< strerror(errno);
150 		SET_OUT_HEADER_LEN(out, write);
151 		out.body.write.size = isize;
152 	})));
153 	EXPECT_CALL(*m_mock, process(
154 		ResultOf([=](auto in) {
155 			return (in.header.opcode == FUSE_READ &&
156 				in.header.nodeid == ino);
157 		}, Eq(true)),
158 		_)
159 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
160 		ssize_t isize = in.body.write.size;
161 		off_t iofs = in.body.write.offset;
162 		void *buf = out.body.bytes;
163 		ssize_t osize;
164 
165 		assert((size_t)isize <= sizeof(out.body.bytes));
166 		osize = pread(m_backing_fd, buf, isize, iofs);
167 		ASSERT_LE(0, osize) << strerror(errno);
168 		out.header.len = sizeof(struct fuse_out_header) + osize;
169 	})));
170 	EXPECT_CALL(*m_mock, process(
171 		ResultOf([=](auto in) {
172 			return (in.header.opcode == FUSE_SETATTR &&
173 				in.header.nodeid == ino &&
174 				(in.body.setattr.valid & FATTR_SIZE));
175 
176 		}, Eq(true)),
177 		_)
178 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
179 		ASSERT_EQ(0, ftruncate(m_backing_fd, in.body.setattr.size))
180 			<< strerror(errno);
181 		SET_OUT_HEADER_LEN(out, attr);
182 		out.body.attr.attr.ino = ino;
183 		out.body.attr.attr.mode = S_IFREG | 0755;
184 		out.body.attr.attr.size = in.body.setattr.size;
185 		out.body.attr.attr_valid = UINT64_MAX;
186 	})));
187 	/* Any test that close()s will send FUSE_FLUSH and FUSE_RELEASE */
188 	EXPECT_CALL(*m_mock, process(
189 		ResultOf([=](auto in) {
190 			return (in.header.opcode == FUSE_FLUSH &&
191 				in.header.nodeid == ino);
192 		}, Eq(true)),
193 		_)
194 	).WillRepeatedly(Invoke(ReturnErrno(0)));
195 	EXPECT_CALL(*m_mock, process(
196 		ResultOf([=](auto in) {
197 			return (in.header.opcode == FUSE_RELEASE &&
198 				in.header.nodeid == ino);
199 		}, Eq(true)),
200 		_)
201 	).WillRepeatedly(Invoke(ReturnErrno(0)));
202 	EXPECT_CALL(*m_mock, process(
203 		ResultOf([=](auto in) {
204 			return (in.header.opcode == FUSE_COPY_FILE_RANGE &&
205 				in.header.nodeid == ino &&
206 				in.body.copy_file_range.nodeid_out == ino &&
207 				in.body.copy_file_range.flags == 0);
208 		}, Eq(true)),
209 		_)
210 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
211 		off_t off_in = in.body.copy_file_range.off_in;
212 		off_t off_out = in.body.copy_file_range.off_out;
213 		ASSERT_EQ((ssize_t)in.body.copy_file_range.len,
214 		    copy_file_range(m_backing_fd, &off_in, m_backing_fd,
215 			    &off_out, in.body.copy_file_range.len, 0));
216 		SET_OUT_HEADER_LEN(out, write);
217 		out.body.write.size = in.body.copy_file_range.len;
218 	})));
219 	/* Claim that we don't support FUSE_LSEEK */
220 	EXPECT_CALL(*m_mock, process(
221 		ResultOf([=](auto in) {
222 			return (in.header.opcode == FUSE_LSEEK);
223 		}, Eq(true)),
224 		_)
225 	).WillRepeatedly(Invoke(ReturnErrno(ENOSYS)));
226 
227 	m_test_fd = open(FULLPATH, O_RDWR );
228 	EXPECT_LE(0, m_test_fd) << strerror(errno);
229 }
230 
TearDown()231 void TearDown()
232 {
233 	if (m_test_fd >= 0)
234 		close(m_test_fd);
235 	if (m_backing_fd >= 0)
236 		close(m_backing_fd);
237 	if (m_control_fd >= 0)
238 		close(m_control_fd);
239 	FuseTest::TearDown();
240 	leak(m_test_fd);
241 }
242 
do_closeopen()243 void do_closeopen()
244 {
245 	ASSERT_EQ(0, close(m_test_fd)) << strerror(errno);
246 	m_test_fd = open("backing_file", O_RDWR);
247 	ASSERT_LE(0, m_test_fd) << strerror(errno);
248 
249 	ASSERT_EQ(0, close(m_control_fd)) << strerror(errno);
250 	m_control_fd = open("control", O_RDWR);
251 	ASSERT_LE(0, m_control_fd) << strerror(errno);
252 }
253 
do_copy_file_range(off_t off_in,off_t off_out,size_t size)254 void do_copy_file_range(off_t off_in, off_t off_out, size_t size)
255 {
256 	ssize_t r;
257 	off_t test_off_in = off_in;
258 	off_t test_off_out = off_out;
259 	off_t test_size = size;
260 	off_t control_off_in = off_in;
261 	off_t control_off_out = off_out;
262 	off_t control_size = size;
263 
264 	while (test_size > 0) {
265 		r = copy_file_range(m_test_fd, &test_off_in, m_test_fd,
266 				&test_off_out, test_size, 0);
267 		ASSERT_GT(r, 0) << strerror(errno);
268 		test_size -= r;
269 	}
270 	while (control_size > 0) {
271 		r = copy_file_range(m_control_fd, &control_off_in, m_control_fd,
272 				&control_off_out, control_size, 0);
273 		ASSERT_GT(r, 0) << strerror(errno);
274 		control_size -= r;
275 	}
276 	m_filesize = std::max(m_filesize, off_out + (off_t)size);
277 }
278 
do_ftruncate(off_t offs)279 void do_ftruncate(off_t offs)
280 {
281 	ASSERT_EQ(0, ftruncate(m_test_fd, offs)) << strerror(errno);
282 	ASSERT_EQ(0, ftruncate(m_control_fd, offs)) << strerror(errno);
283 	m_filesize = offs;
284 }
285 
do_mapread(off_t offs,ssize_t size)286 void do_mapread(off_t offs, ssize_t size)
287 {
288 	char *control_buf;
289 	void *p;
290 	off_t pg_offset, page_mask;
291 	size_t map_size;
292 
293 	page_mask = getpagesize() - 1;
294 	pg_offset = offs & page_mask;
295 	map_size = pg_offset + size;
296 
297 	p = mmap(NULL, map_size, PROT_READ, MAP_FILE | MAP_SHARED, m_test_fd,
298 	    offs - pg_offset);
299 	ASSERT_NE(p, MAP_FAILED) << strerror(errno);
300 
301 	control_buf = new char[size];
302 
303 	ASSERT_EQ(size, pread(m_control_fd, control_buf, size, offs))
304 		<< strerror(errno);
305 
306 	compare((void*)((char*)p + pg_offset), control_buf, offs, size);
307 
308 	ASSERT_EQ(0, munmap(p, map_size)) << strerror(errno);
309 	delete[] control_buf;
310 }
311 
do_read(off_t offs,ssize_t size)312 void do_read(off_t offs, ssize_t size)
313 {
314 	char *test_buf, *control_buf;
315 	ssize_t r;
316 
317 	test_buf = new char[size];
318 	control_buf = new char[size];
319 
320 	errno = 0;
321 	r = pread(m_test_fd, test_buf, size, offs);
322 	ASSERT_NE(-1, r) << strerror(errno);
323 	ASSERT_EQ(size, r) << "unexpected short read";
324 	r = pread(m_control_fd, control_buf, size, offs);
325 	ASSERT_NE(-1, r) << strerror(errno);
326 	ASSERT_EQ(size, r) << "unexpected short read";
327 
328 	compare(test_buf, control_buf, offs, size);
329 
330 	delete[] control_buf;
331 	delete[] test_buf;
332 }
333 
do_mapwrite(off_t offs,ssize_t size)334 void do_mapwrite(off_t offs, ssize_t size)
335 {
336 	char *buf;
337 	void *p;
338 	off_t pg_offset, page_mask;
339 	size_t map_size;
340 	long i;
341 
342 	page_mask = getpagesize() - 1;
343 	pg_offset = offs & page_mask;
344 	map_size = pg_offset + size;
345 
346 	buf = new char[size];
347 	for (i=0; i < size; i++)
348 		buf[i] = random();
349 
350 	if (offs + size > m_filesize) {
351 		/*
352 		 * Must manually extend.  vm_mmap_vnode will not implicitly
353 		 * extend a vnode
354 		 */
355 		do_ftruncate(offs + size);
356 	}
357 
358 	p = mmap(NULL, map_size, PROT_READ | PROT_WRITE,
359 	    MAP_FILE | MAP_SHARED, m_test_fd, offs - pg_offset);
360 	ASSERT_NE(p, MAP_FAILED) << strerror(errno);
361 
362 	bcopy(buf, (char*)p + pg_offset, size);
363 	ASSERT_EQ(size, pwrite(m_control_fd, buf, size, offs))
364 		<< strerror(errno);
365 
366 	delete[] buf;
367 	ASSERT_EQ(0, munmap(p, map_size)) << strerror(errno);
368 }
369 
do_write(off_t offs,ssize_t size)370 void do_write(off_t offs, ssize_t size)
371 {
372 	char *buf;
373 	long i;
374 
375 	buf = new char[size];
376 	for (i=0; i < size; i++)
377 		buf[i] = random();
378 
379 	ASSERT_EQ(size, pwrite(m_test_fd, buf, size, offs ))
380 		<< strerror(errno);
381 	ASSERT_EQ(size, pwrite(m_control_fd, buf, size, offs))
382 		<< strerror(errno);
383 	m_filesize = std::max(m_filesize, offs + size);
384 
385 	delete[] buf;
386 }
387 
388 };
389 
390 class IoCacheable: public Io {
391 public:
SetUp()392 virtual void SetUp() {
393 	Io::SetUp();
394 }
395 };
396 
397 class IoCopyFileRange: public Io {
398 public:
SetUp()399 virtual void SetUp() {
400 	Io::SetUp();
401 }
402 };
403 
404 /*
405  * Extend a file with dirty data in the last page of the last block.
406  *
407  * fsx -WR -P /tmp -S8 -N3 fsx.bin
408  */
TEST_P(Io,extend_from_dirty_page)409 TEST_P(Io, extend_from_dirty_page)
410 {
411 	off_t wofs = 0x21a0;
412 	ssize_t wsize = 0xf0a8;
413 	off_t rofs = 0xb284;
414 	ssize_t rsize = 0x9b22;
415 	off_t truncsize = 0x28702;
416 
417 	do_write(wofs, wsize);
418 	do_ftruncate(truncsize);
419 	do_read(rofs, rsize);
420 }
421 
422 /*
423  * mapwrite into a newly extended part of a file.
424  *
425  * fsx -c 100 -i 100 -l 524288 -o 131072 -N5 -P /tmp -S19 fsx.bin
426  */
TEST_P(IoCacheable,extend_by_mapwrite)427 TEST_P(IoCacheable, extend_by_mapwrite)
428 {
429 	do_mapwrite(0x29a3a, 0x849e);	/* [0x29a3a, 0x31ed7] */
430 	do_mapwrite(0x3c7d8, 0x3994);	/* [0x3c7d8, 0x4016b] */
431 	do_read(0x30c16, 0xf556);	/* [0x30c16, 0x4016b] */
432 }
433 
434 /*
435  * When writing the last page of a file, it must be written synchronously.
436  * Otherwise the cached page can become invalid by a subsequent extend
437  * operation.
438  *
439  * fsx -WR -P /tmp -S642 -N3 fsx.bin
440  */
TEST_P(Io,last_page)441 TEST_P(Io, last_page)
442 {
443 	do_write(0x1134f, 0xcc77);	/* [0x1134f, 0x1dfc5] */
444 	do_write(0x2096a, 0xdfa7);	/* [0x2096a, 0x2e910] */
445 	do_read(0x1a3aa, 0xb5b7);	/* [0x1a3aa, 0x25960] */
446 }
447 
448 /*
449  * Read a hole using mmap
450  *
451  * fsx -c 100 -i 100 -l 524288 -o 131072 -N11 -P /tmp  -S14 fsx.bin
452  */
TEST_P(IoCacheable,mapread_hole)453 TEST_P(IoCacheable, mapread_hole)
454 {
455 	do_write(0xf205, 0x123b7);	/* [0xf205, 0x215bb] */
456 	do_mapread(0x2f4c, 0xeeea);	/* [0x2f4c, 0x11e35] */
457 }
458 
459 /*
460  * Read a hole from a block that contains some cached data.
461  *
462  * fsx -WR -P /tmp -S55  fsx.bin
463  */
TEST_P(Io,read_hole_from_cached_block)464 TEST_P(Io, read_hole_from_cached_block)
465 {
466 	off_t wofs = 0x160c5;
467 	ssize_t wsize = 0xa996;
468 	off_t rofs = 0x472e;
469 	ssize_t rsize = 0xd8d5;
470 
471 	do_write(wofs, wsize);
472 	do_read(rofs, rsize);
473 }
474 
475 /*
476  * Truncating a file into a dirty buffer should not causing anything untoward
477  * to happen when that buffer is eventually flushed.
478  *
479  * fsx -WR -P /tmp -S839 -d -N6 fsx.bin
480  */
TEST_P(Io,truncate_into_dirty_buffer)481 TEST_P(Io, truncate_into_dirty_buffer)
482 {
483 	off_t wofs0 = 0x3bad7;
484 	ssize_t wsize0 = 0x4529;
485 	off_t wofs1 = 0xc30d;
486 	ssize_t wsize1 = 0x5f77;
487 	off_t truncsize0 = 0x10916;
488 	off_t rofs = 0xdf17;
489 	ssize_t rsize = 0x29ff;
490 	off_t truncsize1 = 0x152b4;
491 
492 	do_write(wofs0, wsize0);
493 	do_write(wofs1, wsize1);
494 	do_ftruncate(truncsize0);
495 	do_read(rofs, rsize);
496 	do_ftruncate(truncsize1);
497 	close(m_test_fd);
498 }
499 
500 /*
501  * Truncating a file into a dirty buffer should not causing anything untoward
502  * to happen when that buffer is eventually flushed, even when the buffer's
503  * dirty_off is > 0.
504  *
505  * Based on this command with a few steps removed:
506  * fsx -WR -P /tmp -S677 -d -N8 fsx.bin
507  */
TEST_P(Io,truncate_into_dirty_buffer2)508 TEST_P(Io, truncate_into_dirty_buffer2)
509 {
510 	off_t truncsize0 = 0x344f3;
511 	off_t wofs = 0x2790c;
512 	ssize_t wsize = 0xd86a;
513 	off_t truncsize1 = 0x2de38;
514 	off_t rofs2 = 0x1fd7a;
515 	ssize_t rsize2 = 0xc594;
516 	off_t truncsize2 = 0x31e71;
517 
518 	/* Sets the file size to something larger than the next write */
519 	do_ftruncate(truncsize0);
520 	/*
521 	 * Creates a dirty buffer.  The part in lbn 2 doesn't flush
522 	 * synchronously.
523 	 */
524 	do_write(wofs, wsize);
525 	/* Truncates part of the dirty buffer created in step 2 */
526 	do_ftruncate(truncsize1);
527 	/* XXX ?I don't know why this is necessary? */
528 	do_read(rofs2, rsize2);
529 	/* Truncates the dirty buffer */
530 	do_ftruncate(truncsize2);
531 	close(m_test_fd);
532 }
533 
534 /*
535  * Regression test for a bug introduced in r348931
536  *
537  * Sequence of operations:
538  * 1) The first write reads lbn so it can modify it
539  * 2) The first write flushes lbn 3 immediately because it's the end of file
540  * 3) The first write then flushes lbn 4 because it's the end of the file
541  * 4) The second write modifies the cached versions of lbn 3 and 4
542  * 5) The third write's getblkx invalidates lbn 4's B_CACHE because it's
543  *    extending the buffer.  Then it flushes lbn 4 because B_DELWRI was set but
544  *    B_CACHE was clear.
545  * 6) fuse_write_biobackend erroneously called vfs_bio_clrbuf, putting the
546  *    buffer into a weird write-only state.  All read operations would return
547  *    0.  Writes were apparently still processed, because the buffer's contents
548  *    were correct when examined in a core dump.
549  * 7) The third write reads lbn 4 because cache is clear
550  * 9) uiomove dutifully copies new data into the buffer
551  * 10) The buffer's dirty is flushed to lbn 4
552  * 11) The read returns all zeros because of step 6.
553  *
554  * Based on:
555  * fsx -WR -l 524388 -o 131072 -P /tmp -S6456 -q  fsx.bin
556  */
TEST_P(Io,resize_a_valid_buffer_while_extending)557 TEST_P(Io, resize_a_valid_buffer_while_extending)
558 {
559 	do_write(0x36ee6, 0x14530);	/* [0x36ee6, 0x4b415] */
560 	do_write(0x33256, 0x1507c);	/* [0x33256, 0x482d1] */
561 	do_write(0x4c03d, 0x175c);	/* [0x4c03d, 0x4d798] */
562 	do_read(0x3599c, 0xe277);	/* [0x3599c, 0x43c12] */
563 	close(m_test_fd);
564 }
565 
566 /*
567  * mmap of a suitable region could trigger a panic.  I'm not sure what
568  * combination of size and offset counts as "suitable".  Regression test for
569  * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=276191
570  */
TEST_P(IoCacheable,vnode_pager_generic_putpage_clean_block_at_eof)571 TEST_P(IoCacheable, vnode_pager_generic_putpage_clean_block_at_eof)
572 {
573 	do_mapwrite(0x3b4e0, 0x1bbc3);
574 }
575 
576 /*
577  * A copy_file_range that follows an mmap write to the input area needs to
578  * flush the mmap buffer first.
579  */
TEST_P(IoCopyFileRange,copy_file_range_from_mapped_write)580 TEST_P(IoCopyFileRange, copy_file_range_from_mapped_write)
581 {
582 	do_mapwrite(0, 0x1000);
583 	do_copy_file_range(0, 0x1000, 0x1000);
584 	do_read(0x1000, 0x1000);
585 }
586 
587 
588 INSTANTIATE_TEST_SUITE_P(Io, Io,
589 	Combine(Bool(),					/* async read */
590 		Values(0x1000, 0x10000, 0x20000),	/* m_maxwrite */
591 		Values(Uncached, Writethrough, Writeback, WritebackAsync),
592 		Values(28)				/* kernel_minor_vers */
593 	)
594 );
595 
596 INSTANTIATE_TEST_SUITE_P(Io, IoCacheable,
597 	Combine(Bool(),					/* async read */
598 		Values(0x1000, 0x10000, 0x20000),	/* m_maxwrite */
599 		Values(Writethrough, Writeback, WritebackAsync),
600 		Values(28)				/* kernel_minor_vers */
601 	)
602 );
603 
604 INSTANTIATE_TEST_SUITE_P(Io, IoCopyFileRange,
605 	Combine(Values(true),				/* async read */
606 		Values(0x10000),			/* m_maxwrite */
607 		Values(Writethrough, Writeback, WritebackAsync),
608 		Values(27, 28)				/* kernel_minor_vers */
609 	)
610 );
611