xref: /freebsd/tests/sys/fs/fusefs/io.cc (revision d0a9cc17ba44cff547c673bd3086231a68b76370)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 The FreeBSD Foundation
5  *
6  * This software was developed by BFF Storage Systems, LLC under sponsorship
7  * from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 extern "C" {
32 #include <sys/types.h>
33 #include <sys/mman.h>
34 #include <sys/sysctl.h>
35 
36 #include <fcntl.h>
37 #include <stdlib.h>
38 #include <unistd.h>
39 }
40 
41 #include "mockfs.hh"
42 #include "utils.hh"
43 
44 /*
45  * For testing I/O like fsx does, but deterministically and without a real
46  * underlying file system
47  */
48 
49 using namespace testing;
50 
51 const char FULLPATH[] = "mountpoint/some_file.txt";
52 const char RELPATH[] = "some_file.txt";
53 const uint64_t ino = 42;
54 
55 static void compare(const void *tbuf, const void *controlbuf, off_t baseofs,
56 	ssize_t size)
57 {
58 	int i;
59 
60 	for (i = 0; i < size; i++) {
61 		if (((const char*)tbuf)[i] != ((const char*)controlbuf)[i]) {
62 			off_t ofs = baseofs + i;
63 			FAIL() << "miscompare at offset "
64 			       << std::hex
65 			       << std::showbase
66 			       << ofs
67 			       << ".  expected = "
68 			       << std::setw(2)
69 			       << (unsigned)((const uint8_t*)controlbuf)[i]
70 			       << " got = "
71 			       << (unsigned)((const uint8_t*)tbuf)[i];
72 		}
73 	}
74 }
75 
76 typedef tuple<bool, uint32_t, cache_mode, uint32_t> IoParam;
77 
78 class Io: public FuseTest, public WithParamInterface<IoParam> {
79 public:
80 int m_backing_fd, m_control_fd, m_test_fd;
81 off_t m_filesize;
82 bool m_direct_io;
83 
84 Io(): m_backing_fd(-1), m_control_fd(-1), m_test_fd(-1), m_filesize(0),
85 	m_direct_io(false) {};
86 
87 void SetUp()
88 {
89 	m_backing_fd = open("backing_file", O_RDWR | O_CREAT | O_TRUNC, 0644);
90 	if (m_backing_fd < 0)
91 		FAIL() << strerror(errno);
92 	m_control_fd = open("control", O_RDWR | O_CREAT | O_TRUNC, 0644);
93 	if (m_control_fd < 0)
94 		FAIL() << strerror(errno);
95 	srandom(22'9'1982);	// Seed with my birthday
96 
97 	if (get<0>(GetParam()))
98 		m_init_flags |= FUSE_ASYNC_READ;
99 	m_maxwrite = get<1>(GetParam());
100 	switch (get<2>(GetParam())) {
101 		case Uncached:
102 			m_direct_io = true;
103 			break;
104 		case WritebackAsync:
105 			m_async = true;
106 			/* FALLTHROUGH */
107 		case Writeback:
108 			m_init_flags |= FUSE_WRITEBACK_CACHE;
109 			/* FALLTHROUGH */
110 		case Writethrough:
111 			break;
112 		default:
113 			FAIL() << "Unknown cache mode";
114 	}
115 	m_kernel_minor_version = get<3>(GetParam());
116 	m_noatime = true;	// To prevent SETATTR for atime on close
117 
118 	FuseTest::SetUp();
119 	if (IsSkipped())
120 		return;
121 
122 	if (verbosity > 0) {
123 		printf("Test Parameters: init_flags=%#x maxwrite=%#x "
124 		    "%sasync cache=%s kernel_minor_version=%d\n",
125 		    m_init_flags, m_maxwrite, m_async? "" : "no",
126 		    cache_mode_to_s(get<2>(GetParam())),
127 		    m_kernel_minor_version);
128 	}
129 
130 	expect_lookup(RELPATH, ino, S_IFREG | 0644, 0, 1);
131 	expect_open(ino, m_direct_io ? FOPEN_DIRECT_IO : 0, 1);
132 	EXPECT_CALL(*m_mock, process(
133 		ResultOf([=](auto in) {
134 			return (in.header.opcode == FUSE_WRITE &&
135 				in.header.nodeid == ino);
136 		}, Eq(true)),
137 		_)
138 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
139 		const char *buf = (const char*)in.body.bytes +
140 			sizeof(struct fuse_write_in);
141 		ssize_t isize = in.body.write.size;
142 		off_t iofs = in.body.write.offset;
143 
144 		assert((size_t)isize <= sizeof(in.body.bytes) -
145 			sizeof(struct fuse_write_in));
146 		ASSERT_EQ(isize, pwrite(m_backing_fd, buf, isize, iofs))
147 			<< strerror(errno);
148 		SET_OUT_HEADER_LEN(out, write);
149 		out.body.write.size = isize;
150 	})));
151 	EXPECT_CALL(*m_mock, process(
152 		ResultOf([=](auto in) {
153 			return (in.header.opcode == FUSE_READ &&
154 				in.header.nodeid == ino);
155 		}, Eq(true)),
156 		_)
157 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
158 		ssize_t isize = in.body.write.size;
159 		off_t iofs = in.body.write.offset;
160 		void *buf = out.body.bytes;
161 		ssize_t osize;
162 
163 		assert((size_t)isize <= sizeof(out.body.bytes));
164 		osize = pread(m_backing_fd, buf, isize, iofs);
165 		ASSERT_LE(0, osize) << strerror(errno);
166 		out.header.len = sizeof(struct fuse_out_header) + osize;
167 	})));
168 	EXPECT_CALL(*m_mock, process(
169 		ResultOf([=](auto in) {
170 			return (in.header.opcode == FUSE_SETATTR &&
171 				in.header.nodeid == ino &&
172 				(in.body.setattr.valid & FATTR_SIZE));
173 
174 		}, Eq(true)),
175 		_)
176 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
177 		ASSERT_EQ(0, ftruncate(m_backing_fd, in.body.setattr.size))
178 			<< strerror(errno);
179 		SET_OUT_HEADER_LEN(out, attr);
180 		out.body.attr.attr.ino = ino;
181 		out.body.attr.attr.mode = S_IFREG | 0755;
182 		out.body.attr.attr.size = in.body.setattr.size;
183 		out.body.attr.attr_valid = UINT64_MAX;
184 	})));
185 	/* Any test that close()s will send FUSE_FLUSH and FUSE_RELEASE */
186 	EXPECT_CALL(*m_mock, process(
187 		ResultOf([=](auto in) {
188 			return (in.header.opcode == FUSE_FLUSH &&
189 				in.header.nodeid == ino);
190 		}, Eq(true)),
191 		_)
192 	).WillRepeatedly(Invoke(ReturnErrno(0)));
193 	EXPECT_CALL(*m_mock, process(
194 		ResultOf([=](auto in) {
195 			return (in.header.opcode == FUSE_RELEASE &&
196 				in.header.nodeid == ino);
197 		}, Eq(true)),
198 		_)
199 	).WillRepeatedly(Invoke(ReturnErrno(0)));
200 	EXPECT_CALL(*m_mock, process(
201 		ResultOf([=](auto in) {
202 			return (in.header.opcode == FUSE_COPY_FILE_RANGE &&
203 				in.header.nodeid == ino &&
204 				in.body.copy_file_range.nodeid_out == ino &&
205 				in.body.copy_file_range.flags == 0);
206 		}, Eq(true)),
207 		_)
208 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
209 		off_t off_in = in.body.copy_file_range.off_in;
210 		off_t off_out = in.body.copy_file_range.off_out;
211 		ASSERT_EQ((ssize_t)in.body.copy_file_range.len,
212 		    copy_file_range(m_backing_fd, &off_in, m_backing_fd,
213 			    &off_out, in.body.copy_file_range.len, 0));
214 		SET_OUT_HEADER_LEN(out, write);
215 		out.body.write.size = in.body.copy_file_range.len;
216 	})));
217 	/* Claim that we don't support FUSE_LSEEK */
218 	EXPECT_CALL(*m_mock, process(
219 		ResultOf([=](auto in) {
220 			return (in.header.opcode == FUSE_LSEEK);
221 		}, Eq(true)),
222 		_)
223 	).WillRepeatedly(Invoke(ReturnErrno(ENOSYS)));
224 
225 	m_test_fd = open(FULLPATH, O_RDWR );
226 	EXPECT_LE(0, m_test_fd) << strerror(errno);
227 }
228 
229 void TearDown()
230 {
231 	if (m_test_fd >= 0)
232 		close(m_test_fd);
233 	if (m_backing_fd >= 0)
234 		close(m_backing_fd);
235 	if (m_control_fd >= 0)
236 		close(m_control_fd);
237 	FuseTest::TearDown();
238 	leak(m_test_fd);
239 }
240 
241 void do_closeopen()
242 {
243 	ASSERT_EQ(0, close(m_test_fd)) << strerror(errno);
244 	m_test_fd = open("backing_file", O_RDWR);
245 	ASSERT_LE(0, m_test_fd) << strerror(errno);
246 
247 	ASSERT_EQ(0, close(m_control_fd)) << strerror(errno);
248 	m_control_fd = open("control", O_RDWR);
249 	ASSERT_LE(0, m_control_fd) << strerror(errno);
250 }
251 
252 void do_copy_file_range(off_t off_in, off_t off_out, size_t size)
253 {
254 	ssize_t r;
255 	off_t test_off_in = off_in;
256 	off_t test_off_out = off_out;
257 	off_t test_size = size;
258 	off_t control_off_in = off_in;
259 	off_t control_off_out = off_out;
260 	off_t control_size = size;
261 
262 	while (test_size > 0) {
263 		r = copy_file_range(m_test_fd, &test_off_in, m_test_fd,
264 				&test_off_out, test_size, 0);
265 		ASSERT_GT(r, 0) << strerror(errno);
266 		test_size -= r;
267 	}
268 	while (control_size > 0) {
269 		r = copy_file_range(m_control_fd, &control_off_in, m_control_fd,
270 				&control_off_out, control_size, 0);
271 		ASSERT_GT(r, 0) << strerror(errno);
272 		control_size -= r;
273 	}
274 	m_filesize = std::max(m_filesize, off_out + (off_t)size);
275 }
276 
277 void do_ftruncate(off_t offs)
278 {
279 	ASSERT_EQ(0, ftruncate(m_test_fd, offs)) << strerror(errno);
280 	ASSERT_EQ(0, ftruncate(m_control_fd, offs)) << strerror(errno);
281 	m_filesize = offs;
282 }
283 
284 void do_mapread(off_t offs, ssize_t size)
285 {
286 	char *control_buf;
287 	void *p;
288 	off_t pg_offset, page_mask;
289 	size_t map_size;
290 
291 	page_mask = getpagesize() - 1;
292 	pg_offset = offs & page_mask;
293 	map_size = pg_offset + size;
294 
295 	p = mmap(NULL, map_size, PROT_READ, MAP_FILE | MAP_SHARED, m_test_fd,
296 	    offs - pg_offset);
297 	ASSERT_NE(p, MAP_FAILED) << strerror(errno);
298 
299 	control_buf = new char[size];
300 
301 	ASSERT_EQ(size, pread(m_control_fd, control_buf, size, offs))
302 		<< strerror(errno);
303 
304 	compare((void*)((char*)p + pg_offset), control_buf, offs, size);
305 
306 	ASSERT_EQ(0, munmap(p, map_size)) << strerror(errno);
307 	delete[] control_buf;
308 }
309 
310 void do_read(off_t offs, ssize_t size)
311 {
312 	char *test_buf, *control_buf;
313 	ssize_t r;
314 
315 	test_buf = new char[size];
316 	control_buf = new char[size];
317 
318 	errno = 0;
319 	r = pread(m_test_fd, test_buf, size, offs);
320 	ASSERT_NE(-1, r) << strerror(errno);
321 	ASSERT_EQ(size, r) << "unexpected short read";
322 	r = pread(m_control_fd, control_buf, size, offs);
323 	ASSERT_NE(-1, r) << strerror(errno);
324 	ASSERT_EQ(size, r) << "unexpected short read";
325 
326 	compare(test_buf, control_buf, offs, size);
327 
328 	delete[] control_buf;
329 	delete[] test_buf;
330 }
331 
332 void do_mapwrite(off_t offs, ssize_t size)
333 {
334 	char *buf;
335 	void *p;
336 	off_t pg_offset, page_mask;
337 	size_t map_size;
338 	long i;
339 
340 	page_mask = getpagesize() - 1;
341 	pg_offset = offs & page_mask;
342 	map_size = pg_offset + size;
343 
344 	buf = new char[size];
345 	for (i=0; i < size; i++)
346 		buf[i] = random();
347 
348 	if (offs + size > m_filesize) {
349 		/*
350 		 * Must manually extend.  vm_mmap_vnode will not implicitly
351 		 * extend a vnode
352 		 */
353 		do_ftruncate(offs + size);
354 	}
355 
356 	p = mmap(NULL, map_size, PROT_READ | PROT_WRITE,
357 	    MAP_FILE | MAP_SHARED, m_test_fd, offs - pg_offset);
358 	ASSERT_NE(p, MAP_FAILED) << strerror(errno);
359 
360 	bcopy(buf, (char*)p + pg_offset, size);
361 	ASSERT_EQ(size, pwrite(m_control_fd, buf, size, offs))
362 		<< strerror(errno);
363 
364 	delete[] buf;
365 	ASSERT_EQ(0, munmap(p, map_size)) << strerror(errno);
366 }
367 
368 void do_write(off_t offs, ssize_t size)
369 {
370 	char *buf;
371 	long i;
372 
373 	buf = new char[size];
374 	for (i=0; i < size; i++)
375 		buf[i] = random();
376 
377 	ASSERT_EQ(size, pwrite(m_test_fd, buf, size, offs ))
378 		<< strerror(errno);
379 	ASSERT_EQ(size, pwrite(m_control_fd, buf, size, offs))
380 		<< strerror(errno);
381 	m_filesize = std::max(m_filesize, offs + size);
382 
383 	delete[] buf;
384 }
385 
386 };
387 
388 class IoCacheable: public Io {
389 public:
390 virtual void SetUp() {
391 	Io::SetUp();
392 }
393 };
394 
395 class IoCopyFileRange: public Io {
396 public:
397 virtual void SetUp() {
398 	Io::SetUp();
399 }
400 };
401 
402 /*
403  * Extend a file with dirty data in the last page of the last block.
404  *
405  * fsx -WR -P /tmp -S8 -N3 fsx.bin
406  */
407 TEST_P(Io, extend_from_dirty_page)
408 {
409 	off_t wofs = 0x21a0;
410 	ssize_t wsize = 0xf0a8;
411 	off_t rofs = 0xb284;
412 	ssize_t rsize = 0x9b22;
413 	off_t truncsize = 0x28702;
414 
415 	do_write(wofs, wsize);
416 	do_ftruncate(truncsize);
417 	do_read(rofs, rsize);
418 }
419 
420 /*
421  * mapwrite into a newly extended part of a file.
422  *
423  * fsx -c 100 -i 100 -l 524288 -o 131072 -N5 -P /tmp -S19 fsx.bin
424  */
425 TEST_P(IoCacheable, extend_by_mapwrite)
426 {
427 	do_mapwrite(0x29a3a, 0x849e);	/* [0x29a3a, 0x31ed7] */
428 	do_mapwrite(0x3c7d8, 0x3994);	/* [0x3c7d8, 0x4016b] */
429 	do_read(0x30c16, 0xf556);	/* [0x30c16, 0x4016b] */
430 }
431 
432 /*
433  * When writing the last page of a file, it must be written synchronously.
434  * Otherwise the cached page can become invalid by a subsequent extend
435  * operation.
436  *
437  * fsx -WR -P /tmp -S642 -N3 fsx.bin
438  */
439 TEST_P(Io, last_page)
440 {
441 	do_write(0x1134f, 0xcc77);	/* [0x1134f, 0x1dfc5] */
442 	do_write(0x2096a, 0xdfa7);	/* [0x2096a, 0x2e910] */
443 	do_read(0x1a3aa, 0xb5b7);	/* [0x1a3aa, 0x25960] */
444 }
445 
446 /*
447  * Read a hole using mmap
448  *
449  * fsx -c 100 -i 100 -l 524288 -o 131072 -N11 -P /tmp  -S14 fsx.bin
450  */
451 TEST_P(IoCacheable, mapread_hole)
452 {
453 	do_write(0xf205, 0x123b7);	/* [0xf205, 0x215bb] */
454 	do_mapread(0x2f4c, 0xeeea);	/* [0x2f4c, 0x11e35] */
455 }
456 
457 /*
458  * Read a hole from a block that contains some cached data.
459  *
460  * fsx -WR -P /tmp -S55  fsx.bin
461  */
462 TEST_P(Io, read_hole_from_cached_block)
463 {
464 	off_t wofs = 0x160c5;
465 	ssize_t wsize = 0xa996;
466 	off_t rofs = 0x472e;
467 	ssize_t rsize = 0xd8d5;
468 
469 	do_write(wofs, wsize);
470 	do_read(rofs, rsize);
471 }
472 
473 /*
474  * Truncating a file into a dirty buffer should not causing anything untoward
475  * to happen when that buffer is eventually flushed.
476  *
477  * fsx -WR -P /tmp -S839 -d -N6 fsx.bin
478  */
479 TEST_P(Io, truncate_into_dirty_buffer)
480 {
481 	off_t wofs0 = 0x3bad7;
482 	ssize_t wsize0 = 0x4529;
483 	off_t wofs1 = 0xc30d;
484 	ssize_t wsize1 = 0x5f77;
485 	off_t truncsize0 = 0x10916;
486 	off_t rofs = 0xdf17;
487 	ssize_t rsize = 0x29ff;
488 	off_t truncsize1 = 0x152b4;
489 
490 	do_write(wofs0, wsize0);
491 	do_write(wofs1, wsize1);
492 	do_ftruncate(truncsize0);
493 	do_read(rofs, rsize);
494 	do_ftruncate(truncsize1);
495 	close(m_test_fd);
496 }
497 
498 /*
499  * Truncating a file into a dirty buffer should not causing anything untoward
500  * to happen when that buffer is eventually flushed, even when the buffer's
501  * dirty_off is > 0.
502  *
503  * Based on this command with a few steps removed:
504  * fsx -WR -P /tmp -S677 -d -N8 fsx.bin
505  */
506 TEST_P(Io, truncate_into_dirty_buffer2)
507 {
508 	off_t truncsize0 = 0x344f3;
509 	off_t wofs = 0x2790c;
510 	ssize_t wsize = 0xd86a;
511 	off_t truncsize1 = 0x2de38;
512 	off_t rofs2 = 0x1fd7a;
513 	ssize_t rsize2 = 0xc594;
514 	off_t truncsize2 = 0x31e71;
515 
516 	/* Sets the file size to something larger than the next write */
517 	do_ftruncate(truncsize0);
518 	/*
519 	 * Creates a dirty buffer.  The part in lbn 2 doesn't flush
520 	 * synchronously.
521 	 */
522 	do_write(wofs, wsize);
523 	/* Truncates part of the dirty buffer created in step 2 */
524 	do_ftruncate(truncsize1);
525 	/* XXX ?I don't know why this is necessary? */
526 	do_read(rofs2, rsize2);
527 	/* Truncates the dirty buffer */
528 	do_ftruncate(truncsize2);
529 	close(m_test_fd);
530 }
531 
532 /*
533  * Regression test for a bug introduced in r348931
534  *
535  * Sequence of operations:
536  * 1) The first write reads lbn so it can modify it
537  * 2) The first write flushes lbn 3 immediately because it's the end of file
538  * 3) The first write then flushes lbn 4 because it's the end of the file
539  * 4) The second write modifies the cached versions of lbn 3 and 4
540  * 5) The third write's getblkx invalidates lbn 4's B_CACHE because it's
541  *    extending the buffer.  Then it flushes lbn 4 because B_DELWRI was set but
542  *    B_CACHE was clear.
543  * 6) fuse_write_biobackend erroneously called vfs_bio_clrbuf, putting the
544  *    buffer into a weird write-only state.  All read operations would return
545  *    0.  Writes were apparently still processed, because the buffer's contents
546  *    were correct when examined in a core dump.
547  * 7) The third write reads lbn 4 because cache is clear
548  * 9) uiomove dutifully copies new data into the buffer
549  * 10) The buffer's dirty is flushed to lbn 4
550  * 11) The read returns all zeros because of step 6.
551  *
552  * Based on:
553  * fsx -WR -l 524388 -o 131072 -P /tmp -S6456 -q  fsx.bin
554  */
555 TEST_P(Io, resize_a_valid_buffer_while_extending)
556 {
557 	do_write(0x36ee6, 0x14530);	/* [0x36ee6, 0x4b415] */
558 	do_write(0x33256, 0x1507c);	/* [0x33256, 0x482d1] */
559 	do_write(0x4c03d, 0x175c);	/* [0x4c03d, 0x4d798] */
560 	do_read(0x3599c, 0xe277);	/* [0x3599c, 0x43c12] */
561 	close(m_test_fd);
562 }
563 
564 /*
565  * mmap of a suitable region could trigger a panic.  I'm not sure what
566  * combination of size and offset counts as "suitable".  Regression test for
567  * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=276191
568  */
569 TEST_P(IoCacheable, vnode_pager_generic_putpage_clean_block_at_eof)
570 {
571 	do_mapwrite(0x3b4e0, 0x1bbc3);
572 }
573 
574 /*
575  * A copy_file_range that follows an mmap write to the input area needs to
576  * flush the mmap buffer first.
577  */
578 TEST_P(IoCopyFileRange, copy_file_range_from_mapped_write)
579 {
580 	do_mapwrite(0, 0x1000);
581 	do_copy_file_range(0, 0x1000, 0x1000);
582 	do_read(0x1000, 0x1000);
583 }
584 
585 
586 INSTANTIATE_TEST_SUITE_P(Io, Io,
587 	Combine(Bool(),					/* async read */
588 		Values(0x1000, 0x10000, 0x20000),	/* m_maxwrite */
589 		Values(Uncached, Writethrough, Writeback, WritebackAsync),
590 		Values(28)				/* kernel_minor_vers */
591 	)
592 );
593 
594 INSTANTIATE_TEST_SUITE_P(Io, IoCacheable,
595 	Combine(Bool(),					/* async read */
596 		Values(0x1000, 0x10000, 0x20000),	/* m_maxwrite */
597 		Values(Writethrough, Writeback, WritebackAsync),
598 		Values(28)				/* kernel_minor_vers */
599 	)
600 );
601 
602 INSTANTIATE_TEST_SUITE_P(Io, IoCopyFileRange,
603 	Combine(Values(true),				/* async read */
604 		Values(0x10000),			/* m_maxwrite */
605 		Values(Writethrough, Writeback, WritebackAsync),
606 		Values(27, 28)				/* kernel_minor_vers */
607 	)
608 );
609