1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2019 The FreeBSD Foundation
5 *
6 * This software was developed by BFF Storage Systems, LLC under sponsorship
7 * from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 extern "C" {
32 #include <sys/types.h>
33 #include <sys/mman.h>
34
35 #include <fcntl.h>
36 #include <stdlib.h>
37 #include <unistd.h>
38 }
39
40 #include <iomanip>
41
42 #include "mockfs.hh"
43 #include "utils.hh"
44
45 /*
46 * For testing I/O like fsx does, but deterministically and without a real
47 * underlying file system
48 */
49
50 using namespace testing;
51
52 const char FULLPATH[] = "mountpoint/some_file.txt";
53 const char RELPATH[] = "some_file.txt";
54 const uint64_t ino = 42;
55
compare(const void * tbuf,const void * controlbuf,off_t baseofs,ssize_t size)56 static void compare(const void *tbuf, const void *controlbuf, off_t baseofs,
57 ssize_t size)
58 {
59 int i;
60
61 for (i = 0; i < size; i++) {
62 if (((const char*)tbuf)[i] != ((const char*)controlbuf)[i]) {
63 off_t ofs = baseofs + i;
64 FAIL() << "miscompare at offset "
65 << std::hex
66 << std::showbase
67 << ofs
68 << ". expected = "
69 << std::setw(2)
70 << (unsigned)((const uint8_t*)controlbuf)[i]
71 << " got = "
72 << (unsigned)((const uint8_t*)tbuf)[i];
73 }
74 }
75 }
76
77 typedef tuple<bool, uint32_t, cache_mode, uint32_t> IoParam;
78
79 class Io: public FuseTest, public WithParamInterface<IoParam> {
80 public:
81 int m_backing_fd, m_control_fd, m_test_fd;
82 off_t m_filesize;
83 bool m_direct_io;
84
Io()85 Io(): m_backing_fd(-1), m_control_fd(-1), m_test_fd(-1), m_filesize(0),
86 m_direct_io(false) {};
87
SetUp()88 void SetUp()
89 {
90 m_backing_fd = open("backing_file", O_RDWR | O_CREAT | O_TRUNC, 0644);
91 if (m_backing_fd < 0)
92 FAIL() << strerror(errno);
93 m_control_fd = open("control", O_RDWR | O_CREAT | O_TRUNC, 0644);
94 if (m_control_fd < 0)
95 FAIL() << strerror(errno);
96 srandom(22'9'1982); // Seed with my birthday
97
98 if (get<0>(GetParam()))
99 m_init_flags |= FUSE_ASYNC_READ;
100 m_maxwrite = get<1>(GetParam());
101 switch (get<2>(GetParam())) {
102 case Uncached:
103 m_direct_io = true;
104 break;
105 case WritebackAsync:
106 m_async = true;
107 /* FALLTHROUGH */
108 case Writeback:
109 m_init_flags |= FUSE_WRITEBACK_CACHE;
110 /* FALLTHROUGH */
111 case Writethrough:
112 break;
113 default:
114 FAIL() << "Unknown cache mode";
115 }
116 m_kernel_minor_version = get<3>(GetParam());
117 m_noatime = true; // To prevent SETATTR for atime on close
118
119 FuseTest::SetUp();
120 if (IsSkipped())
121 return;
122
123 if (verbosity > 0) {
124 printf("Test Parameters: init_flags=%#x maxwrite=%#x "
125 "%sasync cache=%s kernel_minor_version=%d\n",
126 m_init_flags, m_maxwrite, m_async? "" : "no",
127 cache_mode_to_s(get<2>(GetParam())),
128 m_kernel_minor_version);
129 }
130
131 expect_lookup(RELPATH, ino, S_IFREG | 0644, 0, 1);
132 expect_open(ino, m_direct_io ? FOPEN_DIRECT_IO : 0, 1);
133 EXPECT_CALL(*m_mock, process(
134 ResultOf([=](auto in) {
135 return (in.header.opcode == FUSE_WRITE &&
136 in.header.nodeid == ino);
137 }, Eq(true)),
138 _)
139 ).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
140 const char *buf = (const char*)in.body.bytes +
141 sizeof(struct fuse_write_in);
142 ssize_t isize = in.body.write.size;
143 off_t iofs = in.body.write.offset;
144
145 assert((size_t)isize <= sizeof(in.body.bytes) -
146 sizeof(struct fuse_write_in));
147 ASSERT_EQ(isize, pwrite(m_backing_fd, buf, isize, iofs))
148 << strerror(errno);
149 SET_OUT_HEADER_LEN(out, write);
150 out.body.write.size = isize;
151 })));
152 EXPECT_CALL(*m_mock, process(
153 ResultOf([=](auto in) {
154 return (in.header.opcode == FUSE_READ &&
155 in.header.nodeid == ino);
156 }, Eq(true)),
157 _)
158 ).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
159 ssize_t isize = in.body.write.size;
160 off_t iofs = in.body.write.offset;
161 void *buf = out.body.bytes;
162 ssize_t osize;
163
164 assert((size_t)isize <= sizeof(out.body.bytes));
165 osize = pread(m_backing_fd, buf, isize, iofs);
166 ASSERT_LE(0, osize) << strerror(errno);
167 out.header.len = sizeof(struct fuse_out_header) + osize;
168 })));
169 EXPECT_CALL(*m_mock, process(
170 ResultOf([=](auto in) {
171 return (in.header.opcode == FUSE_SETATTR &&
172 in.header.nodeid == ino &&
173 (in.body.setattr.valid & FATTR_SIZE));
174
175 }, Eq(true)),
176 _)
177 ).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
178 ASSERT_EQ(0, ftruncate(m_backing_fd, in.body.setattr.size))
179 << strerror(errno);
180 SET_OUT_HEADER_LEN(out, attr);
181 out.body.attr.attr.ino = ino;
182 out.body.attr.attr.mode = S_IFREG | 0755;
183 out.body.attr.attr.size = in.body.setattr.size;
184 out.body.attr.attr_valid = UINT64_MAX;
185 })));
186 /* Any test that close()s will send FUSE_FLUSH and FUSE_RELEASE */
187 EXPECT_CALL(*m_mock, process(
188 ResultOf([=](auto in) {
189 return (in.header.opcode == FUSE_FLUSH &&
190 in.header.nodeid == ino);
191 }, Eq(true)),
192 _)
193 ).WillRepeatedly(Invoke(ReturnErrno(0)));
194 EXPECT_CALL(*m_mock, process(
195 ResultOf([=](auto in) {
196 return (in.header.opcode == FUSE_RELEASE &&
197 in.header.nodeid == ino);
198 }, Eq(true)),
199 _)
200 ).WillRepeatedly(Invoke(ReturnErrno(0)));
201 EXPECT_CALL(*m_mock, process(
202 ResultOf([=](auto in) {
203 return (in.header.opcode == FUSE_COPY_FILE_RANGE &&
204 in.header.nodeid == ino &&
205 in.body.copy_file_range.nodeid_out == ino &&
206 in.body.copy_file_range.flags == 0);
207 }, Eq(true)),
208 _)
209 ).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
210 off_t off_in = in.body.copy_file_range.off_in;
211 off_t off_out = in.body.copy_file_range.off_out;
212 ASSERT_EQ((ssize_t)in.body.copy_file_range.len,
213 copy_file_range(m_backing_fd, &off_in, m_backing_fd,
214 &off_out, in.body.copy_file_range.len, 0));
215 SET_OUT_HEADER_LEN(out, write);
216 out.body.write.size = in.body.copy_file_range.len;
217 })));
218 /* Claim that we don't support FUSE_LSEEK */
219 EXPECT_CALL(*m_mock, process(
220 ResultOf([=](auto in) {
221 return (in.header.opcode == FUSE_LSEEK);
222 }, Eq(true)),
223 _)
224 ).WillRepeatedly(Invoke(ReturnErrno(ENOSYS)));
225
226 m_test_fd = open(FULLPATH, O_RDWR );
227 EXPECT_LE(0, m_test_fd) << strerror(errno);
228 }
229
TearDown()230 void TearDown()
231 {
232 if (m_test_fd >= 0)
233 close(m_test_fd);
234 if (m_backing_fd >= 0)
235 close(m_backing_fd);
236 if (m_control_fd >= 0)
237 close(m_control_fd);
238 FuseTest::TearDown();
239 leak(m_test_fd);
240 }
241
do_closeopen()242 void do_closeopen()
243 {
244 ASSERT_EQ(0, close(m_test_fd)) << strerror(errno);
245 m_test_fd = open("backing_file", O_RDWR);
246 ASSERT_LE(0, m_test_fd) << strerror(errno);
247
248 ASSERT_EQ(0, close(m_control_fd)) << strerror(errno);
249 m_control_fd = open("control", O_RDWR);
250 ASSERT_LE(0, m_control_fd) << strerror(errno);
251 }
252
do_copy_file_range(off_t off_in,off_t off_out,size_t size)253 void do_copy_file_range(off_t off_in, off_t off_out, size_t size)
254 {
255 ssize_t r;
256 off_t test_off_in = off_in;
257 off_t test_off_out = off_out;
258 off_t test_size = size;
259 off_t control_off_in = off_in;
260 off_t control_off_out = off_out;
261 off_t control_size = size;
262
263 while (test_size > 0) {
264 r = copy_file_range(m_test_fd, &test_off_in, m_test_fd,
265 &test_off_out, test_size, 0);
266 ASSERT_GT(r, 0) << strerror(errno);
267 test_size -= r;
268 }
269 while (control_size > 0) {
270 r = copy_file_range(m_control_fd, &control_off_in, m_control_fd,
271 &control_off_out, control_size, 0);
272 ASSERT_GT(r, 0) << strerror(errno);
273 control_size -= r;
274 }
275 m_filesize = std::max(m_filesize, off_out + (off_t)size);
276 }
277
do_ftruncate(off_t offs)278 void do_ftruncate(off_t offs)
279 {
280 ASSERT_EQ(0, ftruncate(m_test_fd, offs)) << strerror(errno);
281 ASSERT_EQ(0, ftruncate(m_control_fd, offs)) << strerror(errno);
282 m_filesize = offs;
283 }
284
do_mapread(off_t offs,ssize_t size)285 void do_mapread(off_t offs, ssize_t size)
286 {
287 char *control_buf;
288 void *p;
289 off_t pg_offset, page_mask;
290 size_t map_size;
291
292 page_mask = getpagesize() - 1;
293 pg_offset = offs & page_mask;
294 map_size = pg_offset + size;
295
296 p = mmap(NULL, map_size, PROT_READ, MAP_FILE | MAP_SHARED, m_test_fd,
297 offs - pg_offset);
298 ASSERT_NE(p, MAP_FAILED) << strerror(errno);
299
300 control_buf = new char[size];
301
302 ASSERT_EQ(size, pread(m_control_fd, control_buf, size, offs))
303 << strerror(errno);
304
305 compare((void*)((char*)p + pg_offset), control_buf, offs, size);
306
307 ASSERT_EQ(0, munmap(p, map_size)) << strerror(errno);
308 delete[] control_buf;
309 }
310
do_read(off_t offs,ssize_t size)311 void do_read(off_t offs, ssize_t size)
312 {
313 char *test_buf, *control_buf;
314 ssize_t r;
315
316 test_buf = new char[size];
317 control_buf = new char[size];
318
319 errno = 0;
320 r = pread(m_test_fd, test_buf, size, offs);
321 ASSERT_NE(-1, r) << strerror(errno);
322 ASSERT_EQ(size, r) << "unexpected short read";
323 r = pread(m_control_fd, control_buf, size, offs);
324 ASSERT_NE(-1, r) << strerror(errno);
325 ASSERT_EQ(size, r) << "unexpected short read";
326
327 compare(test_buf, control_buf, offs, size);
328
329 delete[] control_buf;
330 delete[] test_buf;
331 }
332
do_mapwrite(off_t offs,ssize_t size)333 void do_mapwrite(off_t offs, ssize_t size)
334 {
335 char *buf;
336 void *p;
337 off_t pg_offset, page_mask;
338 size_t map_size;
339 long i;
340
341 page_mask = getpagesize() - 1;
342 pg_offset = offs & page_mask;
343 map_size = pg_offset + size;
344
345 buf = new char[size];
346 for (i=0; i < size; i++)
347 buf[i] = random();
348
349 if (offs + size > m_filesize) {
350 /*
351 * Must manually extend. vm_mmap_vnode will not implicitly
352 * extend a vnode
353 */
354 do_ftruncate(offs + size);
355 }
356
357 p = mmap(NULL, map_size, PROT_READ | PROT_WRITE,
358 MAP_FILE | MAP_SHARED, m_test_fd, offs - pg_offset);
359 ASSERT_NE(p, MAP_FAILED) << strerror(errno);
360
361 bcopy(buf, (char*)p + pg_offset, size);
362 ASSERT_EQ(size, pwrite(m_control_fd, buf, size, offs))
363 << strerror(errno);
364
365 delete[] buf;
366 ASSERT_EQ(0, munmap(p, map_size)) << strerror(errno);
367 }
368
do_write(off_t offs,ssize_t size)369 void do_write(off_t offs, ssize_t size)
370 {
371 char *buf;
372 long i;
373
374 buf = new char[size];
375 for (i=0; i < size; i++)
376 buf[i] = random();
377
378 ASSERT_EQ(size, pwrite(m_test_fd, buf, size, offs ))
379 << strerror(errno);
380 ASSERT_EQ(size, pwrite(m_control_fd, buf, size, offs))
381 << strerror(errno);
382 m_filesize = std::max(m_filesize, offs + size);
383
384 delete[] buf;
385 }
386
387 };
388
389 class IoCacheable: public Io {
390 public:
SetUp()391 virtual void SetUp() {
392 Io::SetUp();
393 }
394 };
395
396 class IoCopyFileRange: public Io {
397 public:
SetUp()398 virtual void SetUp() {
399 Io::SetUp();
400 }
401 };
402
403 /*
404 * Extend a file with dirty data in the last page of the last block.
405 *
406 * fsx -WR -P /tmp -S8 -N3 fsx.bin
407 */
TEST_P(Io,extend_from_dirty_page)408 TEST_P(Io, extend_from_dirty_page)
409 {
410 off_t wofs = 0x21a0;
411 ssize_t wsize = 0xf0a8;
412 off_t rofs = 0xb284;
413 ssize_t rsize = 0x9b22;
414 off_t truncsize = 0x28702;
415
416 do_write(wofs, wsize);
417 do_ftruncate(truncsize);
418 do_read(rofs, rsize);
419 }
420
421 /*
422 * mapwrite into a newly extended part of a file.
423 *
424 * fsx -c 100 -i 100 -l 524288 -o 131072 -N5 -P /tmp -S19 fsx.bin
425 */
TEST_P(IoCacheable,extend_by_mapwrite)426 TEST_P(IoCacheable, extend_by_mapwrite)
427 {
428 do_mapwrite(0x29a3a, 0x849e); /* [0x29a3a, 0x31ed7] */
429 do_mapwrite(0x3c7d8, 0x3994); /* [0x3c7d8, 0x4016b] */
430 do_read(0x30c16, 0xf556); /* [0x30c16, 0x4016b] */
431 }
432
433 /*
434 * When writing the last page of a file, it must be written synchronously.
435 * Otherwise the cached page can become invalid by a subsequent extend
436 * operation.
437 *
438 * fsx -WR -P /tmp -S642 -N3 fsx.bin
439 */
TEST_P(Io,last_page)440 TEST_P(Io, last_page)
441 {
442 do_write(0x1134f, 0xcc77); /* [0x1134f, 0x1dfc5] */
443 do_write(0x2096a, 0xdfa7); /* [0x2096a, 0x2e910] */
444 do_read(0x1a3aa, 0xb5b7); /* [0x1a3aa, 0x25960] */
445 }
446
447 /*
448 * Read a hole using mmap
449 *
450 * fsx -c 100 -i 100 -l 524288 -o 131072 -N11 -P /tmp -S14 fsx.bin
451 */
TEST_P(IoCacheable,mapread_hole)452 TEST_P(IoCacheable, mapread_hole)
453 {
454 do_write(0xf205, 0x123b7); /* [0xf205, 0x215bb] */
455 do_mapread(0x2f4c, 0xeeea); /* [0x2f4c, 0x11e35] */
456 }
457
458 /*
459 * Read a hole from a block that contains some cached data.
460 *
461 * fsx -WR -P /tmp -S55 fsx.bin
462 */
TEST_P(Io,read_hole_from_cached_block)463 TEST_P(Io, read_hole_from_cached_block)
464 {
465 off_t wofs = 0x160c5;
466 ssize_t wsize = 0xa996;
467 off_t rofs = 0x472e;
468 ssize_t rsize = 0xd8d5;
469
470 do_write(wofs, wsize);
471 do_read(rofs, rsize);
472 }
473
474 /*
475 * Truncating a file into a dirty buffer should not causing anything untoward
476 * to happen when that buffer is eventually flushed.
477 *
478 * fsx -WR -P /tmp -S839 -d -N6 fsx.bin
479 */
TEST_P(Io,truncate_into_dirty_buffer)480 TEST_P(Io, truncate_into_dirty_buffer)
481 {
482 off_t wofs0 = 0x3bad7;
483 ssize_t wsize0 = 0x4529;
484 off_t wofs1 = 0xc30d;
485 ssize_t wsize1 = 0x5f77;
486 off_t truncsize0 = 0x10916;
487 off_t rofs = 0xdf17;
488 ssize_t rsize = 0x29ff;
489 off_t truncsize1 = 0x152b4;
490
491 do_write(wofs0, wsize0);
492 do_write(wofs1, wsize1);
493 do_ftruncate(truncsize0);
494 do_read(rofs, rsize);
495 do_ftruncate(truncsize1);
496 close(m_test_fd);
497 }
498
499 /*
500 * Truncating a file into a dirty buffer should not causing anything untoward
501 * to happen when that buffer is eventually flushed, even when the buffer's
502 * dirty_off is > 0.
503 *
504 * Based on this command with a few steps removed:
505 * fsx -WR -P /tmp -S677 -d -N8 fsx.bin
506 */
TEST_P(Io,truncate_into_dirty_buffer2)507 TEST_P(Io, truncate_into_dirty_buffer2)
508 {
509 off_t truncsize0 = 0x344f3;
510 off_t wofs = 0x2790c;
511 ssize_t wsize = 0xd86a;
512 off_t truncsize1 = 0x2de38;
513 off_t rofs2 = 0x1fd7a;
514 ssize_t rsize2 = 0xc594;
515 off_t truncsize2 = 0x31e71;
516
517 /* Sets the file size to something larger than the next write */
518 do_ftruncate(truncsize0);
519 /*
520 * Creates a dirty buffer. The part in lbn 2 doesn't flush
521 * synchronously.
522 */
523 do_write(wofs, wsize);
524 /* Truncates part of the dirty buffer created in step 2 */
525 do_ftruncate(truncsize1);
526 /* XXX ?I don't know why this is necessary? */
527 do_read(rofs2, rsize2);
528 /* Truncates the dirty buffer */
529 do_ftruncate(truncsize2);
530 close(m_test_fd);
531 }
532
533 /*
534 * Regression test for a bug introduced in r348931
535 *
536 * Sequence of operations:
537 * 1) The first write reads lbn so it can modify it
538 * 2) The first write flushes lbn 3 immediately because it's the end of file
539 * 3) The first write then flushes lbn 4 because it's the end of the file
540 * 4) The second write modifies the cached versions of lbn 3 and 4
541 * 5) The third write's getblkx invalidates lbn 4's B_CACHE because it's
542 * extending the buffer. Then it flushes lbn 4 because B_DELWRI was set but
543 * B_CACHE was clear.
544 * 6) fuse_write_biobackend erroneously called vfs_bio_clrbuf, putting the
545 * buffer into a weird write-only state. All read operations would return
546 * 0. Writes were apparently still processed, because the buffer's contents
547 * were correct when examined in a core dump.
548 * 7) The third write reads lbn 4 because cache is clear
549 * 9) uiomove dutifully copies new data into the buffer
550 * 10) The buffer's dirty is flushed to lbn 4
551 * 11) The read returns all zeros because of step 6.
552 *
553 * Based on:
554 * fsx -WR -l 524388 -o 131072 -P /tmp -S6456 -q fsx.bin
555 */
TEST_P(Io,resize_a_valid_buffer_while_extending)556 TEST_P(Io, resize_a_valid_buffer_while_extending)
557 {
558 do_write(0x36ee6, 0x14530); /* [0x36ee6, 0x4b415] */
559 do_write(0x33256, 0x1507c); /* [0x33256, 0x482d1] */
560 do_write(0x4c03d, 0x175c); /* [0x4c03d, 0x4d798] */
561 do_read(0x3599c, 0xe277); /* [0x3599c, 0x43c12] */
562 close(m_test_fd);
563 }
564
565 /*
566 * mmap of a suitable region could trigger a panic. I'm not sure what
567 * combination of size and offset counts as "suitable". Regression test for
568 * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=276191
569 */
TEST_P(IoCacheable,vnode_pager_generic_putpage_clean_block_at_eof)570 TEST_P(IoCacheable, vnode_pager_generic_putpage_clean_block_at_eof)
571 {
572 do_mapwrite(0x3b4e0, 0x1bbc3);
573 }
574
575 /*
576 * A copy_file_range that follows an mmap write to the input area needs to
577 * flush the mmap buffer first.
578 */
TEST_P(IoCopyFileRange,copy_file_range_from_mapped_write)579 TEST_P(IoCopyFileRange, copy_file_range_from_mapped_write)
580 {
581 do_mapwrite(0, 0x1000);
582 do_copy_file_range(0, 0x1000, 0x1000);
583 do_read(0x1000, 0x1000);
584 }
585
586
587 INSTANTIATE_TEST_SUITE_P(Io, Io,
588 Combine(Bool(), /* async read */
589 Values(0x1000, 0x10000, 0x20000), /* m_maxwrite */
590 Values(Uncached, Writethrough, Writeback, WritebackAsync),
591 Values(28) /* kernel_minor_vers */
592 )
593 );
594
595 INSTANTIATE_TEST_SUITE_P(Io, IoCacheable,
596 Combine(Bool(), /* async read */
597 Values(0x1000, 0x10000, 0x20000), /* m_maxwrite */
598 Values(Writethrough, Writeback, WritebackAsync),
599 Values(28) /* kernel_minor_vers */
600 )
601 );
602
603 INSTANTIATE_TEST_SUITE_P(Io, IoCopyFileRange,
604 Combine(Values(true), /* async read */
605 Values(0x10000), /* m_maxwrite */
606 Values(Writethrough, Writeback, WritebackAsync),
607 Values(27, 28) /* kernel_minor_vers */
608 )
609 );
610