1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2019 The FreeBSD Foundation 5 * 6 * This software was developed by BFF Storage Systems, LLC under sponsorship 7 * from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * $FreeBSD$ 31 */ 32 33 extern "C" { 34 #include <sys/param.h> 35 #include <sys/mman.h> 36 #include <sys/resource.h> 37 #include <sys/stat.h> 38 #include <sys/time.h> 39 #include <sys/uio.h> 40 41 #include <aio.h> 42 #include <fcntl.h> 43 #include <signal.h> 44 #include <unistd.h> 45 } 46 47 #include "mockfs.hh" 48 #include "utils.hh" 49 50 using namespace testing; 51 52 class Write: public FuseTest { 53 54 public: 55 static sig_atomic_t s_sigxfsz; 56 57 void SetUp() { 58 s_sigxfsz = 0; 59 FuseTest::SetUp(); 60 } 61 62 void TearDown() { 63 struct sigaction sa; 64 65 bzero(&sa, sizeof(sa)); 66 sa.sa_handler = SIG_DFL; 67 sigaction(SIGXFSZ, &sa, NULL); 68 69 FuseTest::TearDown(); 70 } 71 72 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size) 73 { 74 FuseTest::expect_lookup(relpath, ino, S_IFREG | 0644, size, 1); 75 } 76 77 void expect_release(uint64_t ino, ProcessMockerT r) 78 { 79 EXPECT_CALL(*m_mock, process( 80 ResultOf([=](auto in) { 81 return (in.header.opcode == FUSE_RELEASE && 82 in.header.nodeid == ino); 83 }, Eq(true)), 84 _) 85 ).WillRepeatedly(Invoke(r)); 86 } 87 88 void expect_write(uint64_t ino, uint64_t offset, uint64_t isize, 89 uint64_t osize, const void *contents) 90 { 91 FuseTest::expect_write(ino, offset, isize, osize, 0, 0, contents); 92 } 93 94 /* Expect a write that may or may not come, depending on the cache mode */ 95 void maybe_expect_write(uint64_t ino, uint64_t offset, uint64_t size, 96 const void *contents) 97 { 98 EXPECT_CALL(*m_mock, process( 99 ResultOf([=](auto in) { 100 const char *buf = (const char*)in.body.bytes + 101 sizeof(struct fuse_write_in); 102 103 return (in.header.opcode == FUSE_WRITE && 104 in.header.nodeid == ino && 105 in.body.write.offset == offset && 106 in.body.write.size == size && 107 0 == bcmp(buf, contents, size)); 108 }, Eq(true)), 109 _) 110 ).Times(AtMost(1)) 111 .WillRepeatedly(Invoke( 112 ReturnImmediate([=](auto in __unused, auto& out) { 113 SET_OUT_HEADER_LEN(out, write); 114 out.body.write.size = size; 115 }) 116 )); 117 } 118 119 }; 120 121 sig_atomic_t Write::s_sigxfsz = 0; 122 123 class Write_7_8: public FuseTest { 124 125 public: 126 virtual void SetUp() { 127 m_kernel_minor_version = 8; 128 FuseTest::SetUp(); 129 } 130 131 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size) 132 { 133 FuseTest::expect_lookup_7_8(relpath, ino, S_IFREG | 0644, size, 1); 134 } 135 136 }; 137 138 class AioWrite: public Write { 139 virtual void SetUp() { 140 if (!is_unsafe_aio_enabled()) 141 GTEST_SKIP() << 142 "vfs.aio.enable_unsafe must be set for this test"; 143 FuseTest::SetUp(); 144 } 145 }; 146 147 /* Tests for the writeback cache mode */ 148 class WriteBack: public Write { 149 public: 150 virtual void SetUp() { 151 m_init_flags |= FUSE_WRITEBACK_CACHE; 152 FuseTest::SetUp(); 153 if (IsSkipped()) 154 return; 155 } 156 157 void expect_write(uint64_t ino, uint64_t offset, uint64_t isize, 158 uint64_t osize, const void *contents) 159 { 160 FuseTest::expect_write(ino, offset, isize, osize, FUSE_WRITE_CACHE, 0, 161 contents); 162 } 163 }; 164 165 class WriteBackAsync: public WriteBack { 166 public: 167 virtual void SetUp() { 168 m_async = true; 169 WriteBack::SetUp(); 170 } 171 }; 172 173 class TimeGran: public WriteBackAsync, public WithParamInterface<unsigned> { 174 public: 175 virtual void SetUp() { 176 m_time_gran = 1 << GetParam(); 177 WriteBackAsync::SetUp(); 178 } 179 }; 180 181 /* Tests for clustered writes with WriteBack cacheing */ 182 class WriteCluster: public WriteBack { 183 public: 184 virtual void SetUp() { 185 m_async = true; 186 m_maxwrite = 1 << 25; // Anything larger than MAXPHYS will suffice 187 WriteBack::SetUp(); 188 if (m_maxphys < 2 * DFLTPHYS) 189 GTEST_SKIP() << "MAXPHYS must be at least twice DFLTPHYS" 190 << " for this test"; 191 if (m_maxphys < 2 * m_maxbcachebuf) 192 GTEST_SKIP() << "MAXPHYS must be at least twice maxbcachebuf" 193 << " for this test"; 194 } 195 }; 196 197 void sigxfsz_handler(int __unused sig) { 198 Write::s_sigxfsz = 1; 199 } 200 201 /* AIO writes need to set the header's pid field correctly */ 202 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236379 */ 203 TEST_F(AioWrite, DISABLED_aio_write) 204 { 205 const char FULLPATH[] = "mountpoint/some_file.txt"; 206 const char RELPATH[] = "some_file.txt"; 207 const char *CONTENTS = "abcdefgh"; 208 uint64_t ino = 42; 209 uint64_t offset = 4096; 210 int fd; 211 ssize_t bufsize = strlen(CONTENTS); 212 struct aiocb iocb, *piocb; 213 214 expect_lookup(RELPATH, ino, 0); 215 expect_open(ino, 0, 1); 216 expect_write(ino, offset, bufsize, bufsize, CONTENTS); 217 218 fd = open(FULLPATH, O_WRONLY); 219 EXPECT_LE(0, fd) << strerror(errno); 220 221 iocb.aio_nbytes = bufsize; 222 iocb.aio_fildes = fd; 223 iocb.aio_buf = __DECONST(void *, CONTENTS); 224 iocb.aio_offset = offset; 225 iocb.aio_sigevent.sigev_notify = SIGEV_NONE; 226 ASSERT_EQ(0, aio_write(&iocb)) << strerror(errno); 227 ASSERT_EQ(bufsize, aio_waitcomplete(&piocb, NULL)) << strerror(errno); 228 leak(fd); 229 } 230 231 /* 232 * When a file is opened with O_APPEND, we should forward that flag to 233 * FUSE_OPEN (tested by Open.o_append) but still attempt to calculate the 234 * offset internally. That way we'll work both with filesystems that 235 * understand O_APPEND (and ignore the offset) and filesystems that don't (and 236 * simply use the offset). 237 * 238 * Note that verifying the O_APPEND flag in FUSE_OPEN is done in the 239 * Open.o_append test. 240 */ 241 TEST_F(Write, append) 242 { 243 const ssize_t BUFSIZE = 9; 244 const char FULLPATH[] = "mountpoint/some_file.txt"; 245 const char RELPATH[] = "some_file.txt"; 246 const char CONTENTS[BUFSIZE] = "abcdefgh"; 247 uint64_t ino = 42; 248 /* 249 * Set offset to a maxbcachebuf boundary so we don't need to RMW when 250 * using writeback caching 251 */ 252 uint64_t initial_offset = m_maxbcachebuf; 253 int fd; 254 255 expect_lookup(RELPATH, ino, initial_offset); 256 expect_open(ino, 0, 1); 257 expect_write(ino, initial_offset, BUFSIZE, BUFSIZE, CONTENTS); 258 259 /* Must open O_RDWR or fuse(4) implicitly sets direct_io */ 260 fd = open(FULLPATH, O_RDWR | O_APPEND); 261 EXPECT_LE(0, fd) << strerror(errno); 262 263 ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno); 264 leak(fd); 265 } 266 267 /* If a file is cached, then appending to the end should not cause a read */ 268 TEST_F(Write, append_to_cached) 269 { 270 const ssize_t BUFSIZE = 9; 271 const char FULLPATH[] = "mountpoint/some_file.txt"; 272 const char RELPATH[] = "some_file.txt"; 273 char *oldcontents, *oldbuf; 274 const char CONTENTS[BUFSIZE] = "abcdefgh"; 275 uint64_t ino = 42; 276 /* 277 * Set offset in between maxbcachebuf boundary to test buffer handling 278 */ 279 uint64_t oldsize = m_maxbcachebuf / 2; 280 int fd; 281 282 oldcontents = (char*)calloc(1, oldsize); 283 ASSERT_NE(nullptr, oldcontents) << strerror(errno); 284 oldbuf = (char*)malloc(oldsize); 285 ASSERT_NE(nullptr, oldbuf) << strerror(errno); 286 287 expect_lookup(RELPATH, ino, oldsize); 288 expect_open(ino, 0, 1); 289 expect_read(ino, 0, oldsize, oldsize, oldcontents); 290 maybe_expect_write(ino, oldsize, BUFSIZE, CONTENTS); 291 292 /* Must open O_RDWR or fuse(4) implicitly sets direct_io */ 293 fd = open(FULLPATH, O_RDWR | O_APPEND); 294 EXPECT_LE(0, fd) << strerror(errno); 295 296 /* Read the old data into the cache */ 297 ASSERT_EQ((ssize_t)oldsize, read(fd, oldbuf, oldsize)) 298 << strerror(errno); 299 300 /* Write the new data. There should be no more read operations */ 301 ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno); 302 leak(fd); 303 free(oldbuf); 304 free(oldcontents); 305 } 306 307 TEST_F(Write, append_direct_io) 308 { 309 const ssize_t BUFSIZE = 9; 310 const char FULLPATH[] = "mountpoint/some_file.txt"; 311 const char RELPATH[] = "some_file.txt"; 312 const char CONTENTS[BUFSIZE] = "abcdefgh"; 313 uint64_t ino = 42; 314 uint64_t initial_offset = 4096; 315 int fd; 316 317 expect_lookup(RELPATH, ino, initial_offset); 318 expect_open(ino, FOPEN_DIRECT_IO, 1); 319 expect_write(ino, initial_offset, BUFSIZE, BUFSIZE, CONTENTS); 320 321 fd = open(FULLPATH, O_WRONLY | O_APPEND); 322 EXPECT_LE(0, fd) << strerror(errno); 323 324 ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno); 325 leak(fd); 326 } 327 328 /* A direct write should evict any overlapping cached data */ 329 TEST_F(Write, direct_io_evicts_cache) 330 { 331 const char FULLPATH[] = "mountpoint/some_file.txt"; 332 const char RELPATH[] = "some_file.txt"; 333 const char CONTENTS0[] = "abcdefgh"; 334 const char CONTENTS1[] = "ijklmnop"; 335 uint64_t ino = 42; 336 int fd; 337 ssize_t bufsize = strlen(CONTENTS0) + 1; 338 char readbuf[bufsize]; 339 340 expect_lookup(RELPATH, ino, bufsize); 341 expect_open(ino, 0, 1); 342 expect_read(ino, 0, bufsize, bufsize, CONTENTS0); 343 expect_write(ino, 0, bufsize, bufsize, CONTENTS1); 344 345 fd = open(FULLPATH, O_RDWR); 346 EXPECT_LE(0, fd) << strerror(errno); 347 348 // Prime cache 349 ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno); 350 351 // Write directly, evicting cache 352 ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno); 353 ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno); 354 ASSERT_EQ(bufsize, write(fd, CONTENTS1, bufsize)) << strerror(errno); 355 356 // Read again. Cache should be bypassed 357 expect_read(ino, 0, bufsize, bufsize, CONTENTS1); 358 ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno); 359 ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno); 360 ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno); 361 ASSERT_STREQ(readbuf, CONTENTS1); 362 363 leak(fd); 364 } 365 366 /* 367 * If the server doesn't return FOPEN_DIRECT_IO during FUSE_OPEN, then it's not 368 * allowed to return a short write for that file handle. However, if it does 369 * then we should still do our darndest to handle it by resending the unwritten 370 * portion. 371 */ 372 TEST_F(Write, indirect_io_short_write) 373 { 374 const char FULLPATH[] = "mountpoint/some_file.txt"; 375 const char RELPATH[] = "some_file.txt"; 376 const char *CONTENTS = "abcdefghijklmnop"; 377 uint64_t ino = 42; 378 int fd; 379 ssize_t bufsize = strlen(CONTENTS); 380 ssize_t bufsize0 = 11; 381 ssize_t bufsize1 = strlen(CONTENTS) - bufsize0; 382 const char *contents1 = CONTENTS + bufsize0; 383 384 expect_lookup(RELPATH, ino, 0); 385 expect_open(ino, 0, 1); 386 expect_write(ino, 0, bufsize, bufsize0, CONTENTS); 387 expect_write(ino, bufsize0, bufsize1, bufsize1, contents1); 388 389 fd = open(FULLPATH, O_WRONLY); 390 EXPECT_LE(0, fd) << strerror(errno); 391 392 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno); 393 leak(fd); 394 } 395 396 /* 397 * When the direct_io option is used, filesystems are allowed to write less 398 * data than requested. We should return the short write to userland. 399 */ 400 TEST_F(Write, direct_io_short_write) 401 { 402 const char FULLPATH[] = "mountpoint/some_file.txt"; 403 const char RELPATH[] = "some_file.txt"; 404 const char *CONTENTS = "abcdefghijklmnop"; 405 uint64_t ino = 42; 406 int fd; 407 ssize_t bufsize = strlen(CONTENTS); 408 ssize_t halfbufsize = bufsize / 2; 409 410 expect_lookup(RELPATH, ino, 0); 411 expect_open(ino, FOPEN_DIRECT_IO, 1); 412 expect_write(ino, 0, bufsize, halfbufsize, CONTENTS); 413 414 fd = open(FULLPATH, O_WRONLY); 415 EXPECT_LE(0, fd) << strerror(errno); 416 417 ASSERT_EQ(halfbufsize, write(fd, CONTENTS, bufsize)) << strerror(errno); 418 leak(fd); 419 } 420 421 /* 422 * An insidious edge case: the filesystem returns a short write, and the 423 * difference between what we requested and what it actually wrote crosses an 424 * iov element boundary 425 */ 426 TEST_F(Write, direct_io_short_write_iov) 427 { 428 const char FULLPATH[] = "mountpoint/some_file.txt"; 429 const char RELPATH[] = "some_file.txt"; 430 const char *CONTENTS0 = "abcdefgh"; 431 const char *CONTENTS1 = "ijklmnop"; 432 const char *EXPECTED0 = "abcdefghijklmnop"; 433 uint64_t ino = 42; 434 int fd; 435 ssize_t size0 = strlen(CONTENTS0) - 1; 436 ssize_t size1 = strlen(CONTENTS1) + 1; 437 ssize_t totalsize = size0 + size1; 438 struct iovec iov[2]; 439 440 expect_lookup(RELPATH, ino, 0); 441 expect_open(ino, FOPEN_DIRECT_IO, 1); 442 expect_write(ino, 0, totalsize, size0, EXPECTED0); 443 444 fd = open(FULLPATH, O_WRONLY); 445 EXPECT_LE(0, fd) << strerror(errno); 446 447 iov[0].iov_base = __DECONST(void*, CONTENTS0); 448 iov[0].iov_len = strlen(CONTENTS0); 449 iov[1].iov_base = __DECONST(void*, CONTENTS1); 450 iov[1].iov_len = strlen(CONTENTS1); 451 ASSERT_EQ(size0, writev(fd, iov, 2)) << strerror(errno); 452 leak(fd); 453 } 454 455 /* fusefs should respect RLIMIT_FSIZE */ 456 TEST_F(Write, rlimit_fsize) 457 { 458 const char FULLPATH[] = "mountpoint/some_file.txt"; 459 const char RELPATH[] = "some_file.txt"; 460 const char *CONTENTS = "abcdefgh"; 461 struct rlimit rl; 462 ssize_t bufsize = strlen(CONTENTS); 463 off_t offset = 1'000'000'000; 464 uint64_t ino = 42; 465 int fd; 466 467 expect_lookup(RELPATH, ino, 0); 468 expect_open(ino, 0, 1); 469 470 rl.rlim_cur = offset; 471 rl.rlim_max = 10 * offset; 472 ASSERT_EQ(0, setrlimit(RLIMIT_FSIZE, &rl)) << strerror(errno); 473 ASSERT_NE(SIG_ERR, signal(SIGXFSZ, sigxfsz_handler)) << strerror(errno); 474 475 fd = open(FULLPATH, O_WRONLY); 476 477 EXPECT_LE(0, fd) << strerror(errno); 478 479 ASSERT_EQ(-1, pwrite(fd, CONTENTS, bufsize, offset)); 480 EXPECT_EQ(EFBIG, errno); 481 EXPECT_EQ(1, s_sigxfsz); 482 leak(fd); 483 } 484 485 /* 486 * A short read indicates EOF. Test that nothing bad happens if we get EOF 487 * during the R of a RMW operation. 488 */ 489 TEST_F(Write, eof_during_rmw) 490 { 491 const char FULLPATH[] = "mountpoint/some_file.txt"; 492 const char RELPATH[] = "some_file.txt"; 493 const char *CONTENTS = "abcdefgh"; 494 const char *INITIAL = "XXXXXXXXXX"; 495 uint64_t ino = 42; 496 uint64_t offset = 1; 497 ssize_t bufsize = strlen(CONTENTS); 498 off_t orig_fsize = 10; 499 off_t truncated_fsize = 5; 500 off_t final_fsize = bufsize; 501 int fd; 502 503 FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, orig_fsize, 1); 504 expect_open(ino, 0, 1); 505 expect_read(ino, 0, orig_fsize, truncated_fsize, INITIAL, O_RDWR); 506 expect_getattr(ino, truncated_fsize); 507 expect_read(ino, 0, final_fsize, final_fsize, INITIAL, O_RDWR); 508 maybe_expect_write(ino, offset, bufsize, CONTENTS); 509 510 fd = open(FULLPATH, O_RDWR); 511 EXPECT_LE(0, fd) << strerror(errno); 512 513 ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset)) 514 << strerror(errno); 515 leak(fd); 516 } 517 518 /* 519 * If the kernel cannot be sure which uid, gid, or pid was responsible for a 520 * write, then it must set the FUSE_WRITE_CACHE bit 521 */ 522 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236378 */ 523 TEST_F(Write, mmap) 524 { 525 const char FULLPATH[] = "mountpoint/some_file.txt"; 526 const char RELPATH[] = "some_file.txt"; 527 const char *CONTENTS = "abcdefgh"; 528 uint64_t ino = 42; 529 int fd; 530 ssize_t bufsize = strlen(CONTENTS); 531 void *p; 532 uint64_t offset = 10; 533 size_t len; 534 void *zeros, *expected; 535 536 len = getpagesize(); 537 538 zeros = calloc(1, len); 539 ASSERT_NE(nullptr, zeros); 540 expected = calloc(1, len); 541 ASSERT_NE(nullptr, expected); 542 memmove((uint8_t*)expected + offset, CONTENTS, bufsize); 543 544 expect_lookup(RELPATH, ino, len); 545 expect_open(ino, 0, 1); 546 expect_read(ino, 0, len, len, zeros); 547 /* 548 * Writes from the pager may or may not be associated with the correct 549 * pid, so they must set FUSE_WRITE_CACHE. 550 */ 551 FuseTest::expect_write(ino, 0, len, len, FUSE_WRITE_CACHE, 0, expected); 552 expect_flush(ino, 1, ReturnErrno(0)); 553 expect_release(ino, ReturnErrno(0)); 554 555 fd = open(FULLPATH, O_RDWR); 556 EXPECT_LE(0, fd) << strerror(errno); 557 558 p = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 559 ASSERT_NE(MAP_FAILED, p) << strerror(errno); 560 561 memmove((uint8_t*)p + offset, CONTENTS, bufsize); 562 563 ASSERT_EQ(0, munmap(p, len)) << strerror(errno); 564 close(fd); // Write mmap'd data on close 565 566 free(expected); 567 free(zeros); 568 569 leak(fd); 570 } 571 572 TEST_F(Write, pwrite) 573 { 574 const char FULLPATH[] = "mountpoint/some_file.txt"; 575 const char RELPATH[] = "some_file.txt"; 576 const char *CONTENTS = "abcdefgh"; 577 uint64_t ino = 42; 578 uint64_t offset = m_maxbcachebuf; 579 int fd; 580 ssize_t bufsize = strlen(CONTENTS); 581 582 expect_lookup(RELPATH, ino, 0); 583 expect_open(ino, 0, 1); 584 expect_write(ino, offset, bufsize, bufsize, CONTENTS); 585 586 fd = open(FULLPATH, O_WRONLY); 587 EXPECT_LE(0, fd) << strerror(errno); 588 589 ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset)) 590 << strerror(errno); 591 leak(fd); 592 } 593 594 /* Writing a file should update its cached mtime and ctime */ 595 TEST_F(Write, timestamps) 596 { 597 const char FULLPATH[] = "mountpoint/some_file.txt"; 598 const char RELPATH[] = "some_file.txt"; 599 const char *CONTENTS = "abcdefgh"; 600 ssize_t bufsize = strlen(CONTENTS); 601 uint64_t ino = 42; 602 struct stat sb0, sb1; 603 int fd; 604 605 expect_lookup(RELPATH, ino, 0); 606 expect_open(ino, 0, 1); 607 maybe_expect_write(ino, 0, bufsize, CONTENTS); 608 609 fd = open(FULLPATH, O_RDWR); 610 EXPECT_LE(0, fd) << strerror(errno); 611 ASSERT_EQ(0, fstat(fd, &sb0)) << strerror(errno); 612 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno); 613 614 nap(); 615 616 ASSERT_EQ(0, fstat(fd, &sb1)) << strerror(errno); 617 618 EXPECT_EQ(sb0.st_atime, sb1.st_atime); 619 EXPECT_NE(sb0.st_mtime, sb1.st_mtime); 620 EXPECT_NE(sb0.st_ctime, sb1.st_ctime); 621 622 leak(fd); 623 } 624 625 TEST_F(Write, write) 626 { 627 const char FULLPATH[] = "mountpoint/some_file.txt"; 628 const char RELPATH[] = "some_file.txt"; 629 const char *CONTENTS = "abcdefgh"; 630 uint64_t ino = 42; 631 int fd; 632 ssize_t bufsize = strlen(CONTENTS); 633 634 expect_lookup(RELPATH, ino, 0); 635 expect_open(ino, 0, 1); 636 expect_write(ino, 0, bufsize, bufsize, CONTENTS); 637 638 fd = open(FULLPATH, O_WRONLY); 639 EXPECT_LE(0, fd) << strerror(errno); 640 641 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno); 642 leak(fd); 643 } 644 645 /* fuse(4) should not issue writes of greater size than the daemon requests */ 646 TEST_F(Write, write_large) 647 { 648 const char FULLPATH[] = "mountpoint/some_file.txt"; 649 const char RELPATH[] = "some_file.txt"; 650 int *contents; 651 uint64_t ino = 42; 652 int fd; 653 ssize_t halfbufsize, bufsize; 654 655 halfbufsize = m_mock->m_maxwrite; 656 bufsize = halfbufsize * 2; 657 contents = (int*)malloc(bufsize); 658 ASSERT_NE(nullptr, contents); 659 for (int i = 0; i < (int)bufsize / (int)sizeof(i); i++) { 660 contents[i] = i; 661 } 662 663 expect_lookup(RELPATH, ino, 0); 664 expect_open(ino, 0, 1); 665 maybe_expect_write(ino, 0, halfbufsize, contents); 666 maybe_expect_write(ino, halfbufsize, halfbufsize, 667 &contents[halfbufsize / sizeof(int)]); 668 669 fd = open(FULLPATH, O_WRONLY); 670 EXPECT_LE(0, fd) << strerror(errno); 671 672 ASSERT_EQ(bufsize, write(fd, contents, bufsize)) << strerror(errno); 673 leak(fd); 674 675 free(contents); 676 } 677 678 TEST_F(Write, write_nothing) 679 { 680 const char FULLPATH[] = "mountpoint/some_file.txt"; 681 const char RELPATH[] = "some_file.txt"; 682 const char *CONTENTS = ""; 683 uint64_t ino = 42; 684 int fd; 685 ssize_t bufsize = 0; 686 687 expect_lookup(RELPATH, ino, 0); 688 expect_open(ino, 0, 1); 689 690 fd = open(FULLPATH, O_WRONLY); 691 EXPECT_LE(0, fd) << strerror(errno); 692 693 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno); 694 leak(fd); 695 } 696 697 TEST_F(Write_7_8, write) 698 { 699 const char FULLPATH[] = "mountpoint/some_file.txt"; 700 const char RELPATH[] = "some_file.txt"; 701 const char *CONTENTS = "abcdefgh"; 702 uint64_t ino = 42; 703 int fd; 704 ssize_t bufsize = strlen(CONTENTS); 705 706 expect_lookup(RELPATH, ino, 0); 707 expect_open(ino, 0, 1); 708 expect_write_7_8(ino, 0, bufsize, bufsize, CONTENTS); 709 710 fd = open(FULLPATH, O_WRONLY); 711 EXPECT_LE(0, fd) << strerror(errno); 712 713 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno); 714 leak(fd); 715 } 716 717 /* In writeback mode, dirty data should be written on close */ 718 TEST_F(WriteBackAsync, close) 719 { 720 const char FULLPATH[] = "mountpoint/some_file.txt"; 721 const char RELPATH[] = "some_file.txt"; 722 const char *CONTENTS = "abcdefgh"; 723 uint64_t ino = 42; 724 int fd; 725 ssize_t bufsize = strlen(CONTENTS); 726 727 expect_lookup(RELPATH, ino, 0); 728 expect_open(ino, 0, 1); 729 expect_write(ino, 0, bufsize, bufsize, CONTENTS); 730 EXPECT_CALL(*m_mock, process( 731 ResultOf([=](auto in) { 732 return (in.header.opcode == FUSE_SETATTR); 733 }, Eq(true)), 734 _) 735 ).WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) { 736 SET_OUT_HEADER_LEN(out, attr); 737 out.body.attr.attr.ino = ino; // Must match nodeid 738 }))); 739 expect_flush(ino, 1, ReturnErrno(0)); 740 expect_release(ino, ReturnErrno(0)); 741 742 fd = open(FULLPATH, O_RDWR); 743 ASSERT_LE(0, fd) << strerror(errno); 744 745 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno); 746 close(fd); 747 } 748 749 /* In writeback mode, adjacent writes will be clustered together */ 750 TEST_F(WriteCluster, clustering) 751 { 752 const char FULLPATH[] = "mountpoint/some_file.txt"; 753 const char RELPATH[] = "some_file.txt"; 754 uint64_t ino = 42; 755 int i, fd; 756 void *wbuf, *wbuf2x; 757 ssize_t bufsize = m_maxbcachebuf; 758 off_t filesize = 5 * bufsize; 759 760 wbuf = malloc(bufsize); 761 ASSERT_NE(nullptr, wbuf) << strerror(errno); 762 memset(wbuf, 'X', bufsize); 763 wbuf2x = malloc(2 * bufsize); 764 ASSERT_NE(nullptr, wbuf2x) << strerror(errno); 765 memset(wbuf2x, 'X', 2 * bufsize); 766 767 expect_lookup(RELPATH, ino, filesize); 768 expect_open(ino, 0, 1); 769 /* 770 * Writes of bufsize-bytes each should be clustered into greater sizes. 771 * The amount of clustering is adaptive, so the first write actually 772 * issued will be 2x bufsize and subsequent writes may be larger 773 */ 774 expect_write(ino, 0, 2 * bufsize, 2 * bufsize, wbuf2x); 775 expect_write(ino, 2 * bufsize, 2 * bufsize, 2 * bufsize, wbuf2x); 776 expect_flush(ino, 1, ReturnErrno(0)); 777 expect_release(ino, ReturnErrno(0)); 778 779 fd = open(FULLPATH, O_RDWR); 780 ASSERT_LE(0, fd) << strerror(errno); 781 782 for (i = 0; i < 4; i++) { 783 ASSERT_EQ(bufsize, write(fd, wbuf, bufsize)) 784 << strerror(errno); 785 } 786 close(fd); 787 free(wbuf2x); 788 free(wbuf); 789 } 790 791 /* 792 * When clustering writes, an I/O error to any of the cluster's children should 793 * not panic the system on unmount 794 */ 795 /* 796 * Disabled because it panics. 797 * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=238565 798 */ 799 TEST_F(WriteCluster, DISABLED_cluster_write_err) 800 { 801 const char FULLPATH[] = "mountpoint/some_file.txt"; 802 const char RELPATH[] = "some_file.txt"; 803 uint64_t ino = 42; 804 int i, fd; 805 void *wbuf; 806 ssize_t bufsize = m_maxbcachebuf; 807 off_t filesize = 4 * bufsize; 808 809 wbuf = malloc(bufsize); 810 ASSERT_NE(nullptr, wbuf) << strerror(errno); 811 memset(wbuf, 'X', bufsize); 812 813 expect_lookup(RELPATH, ino, filesize); 814 expect_open(ino, 0, 1); 815 EXPECT_CALL(*m_mock, process( 816 ResultOf([=](auto in) { 817 return (in.header.opcode == FUSE_WRITE); 818 }, Eq(true)), 819 _) 820 ).WillRepeatedly(Invoke(ReturnErrno(EIO))); 821 expect_flush(ino, 1, ReturnErrno(0)); 822 expect_release(ino, ReturnErrno(0)); 823 824 fd = open(FULLPATH, O_RDWR); 825 ASSERT_LE(0, fd) << strerror(errno); 826 827 for (i = 0; i < 3; i++) { 828 ASSERT_EQ(bufsize, write(fd, wbuf, bufsize)) 829 << strerror(errno); 830 } 831 close(fd); 832 free(wbuf); 833 } 834 835 /* 836 * In writeback mode, writes to an O_WRONLY file could trigger reads from the 837 * server. The FUSE protocol explicitly allows that. 838 */ 839 TEST_F(WriteBack, rmw) 840 { 841 const char FULLPATH[] = "mountpoint/some_file.txt"; 842 const char RELPATH[] = "some_file.txt"; 843 const char *CONTENTS = "abcdefgh"; 844 const char *INITIAL = "XXXXXXXXXX"; 845 uint64_t ino = 42; 846 uint64_t offset = 1; 847 off_t fsize = 10; 848 int fd; 849 ssize_t bufsize = strlen(CONTENTS); 850 851 FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, fsize, 1); 852 expect_open(ino, 0, 1); 853 expect_read(ino, 0, fsize, fsize, INITIAL, O_WRONLY); 854 maybe_expect_write(ino, offset, bufsize, CONTENTS); 855 856 fd = open(FULLPATH, O_WRONLY); 857 EXPECT_LE(0, fd) << strerror(errno); 858 859 ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset)) 860 << strerror(errno); 861 leak(fd); 862 } 863 864 /* 865 * Without direct_io, writes should be committed to cache 866 */ 867 TEST_F(WriteBack, cache) 868 { 869 const char FULLPATH[] = "mountpoint/some_file.txt"; 870 const char RELPATH[] = "some_file.txt"; 871 const char *CONTENTS = "abcdefgh"; 872 uint64_t ino = 42; 873 int fd; 874 ssize_t bufsize = strlen(CONTENTS); 875 uint8_t readbuf[bufsize]; 876 877 expect_lookup(RELPATH, ino, 0); 878 expect_open(ino, 0, 1); 879 expect_write(ino, 0, bufsize, bufsize, CONTENTS); 880 881 fd = open(FULLPATH, O_RDWR); 882 EXPECT_LE(0, fd) << strerror(errno); 883 884 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno); 885 /* 886 * A subsequent read should be serviced by cache, without querying the 887 * filesystem daemon 888 */ 889 ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno); 890 ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno); 891 leak(fd); 892 } 893 894 /* 895 * With O_DIRECT, writes should be not committed to cache. Admittedly this is 896 * an odd test, because it would be unusual to use O_DIRECT for writes but not 897 * reads. 898 */ 899 TEST_F(WriteBack, o_direct) 900 { 901 const char FULLPATH[] = "mountpoint/some_file.txt"; 902 const char RELPATH[] = "some_file.txt"; 903 const char *CONTENTS = "abcdefgh"; 904 uint64_t ino = 42; 905 int fd; 906 ssize_t bufsize = strlen(CONTENTS); 907 uint8_t readbuf[bufsize]; 908 909 expect_lookup(RELPATH, ino, 0); 910 expect_open(ino, 0, 1); 911 FuseTest::expect_write(ino, 0, bufsize, bufsize, 0, FUSE_WRITE_CACHE, 912 CONTENTS); 913 expect_read(ino, 0, bufsize, bufsize, CONTENTS); 914 915 fd = open(FULLPATH, O_RDWR | O_DIRECT); 916 EXPECT_LE(0, fd) << strerror(errno); 917 918 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno); 919 /* A subsequent read must query the daemon because cache is empty */ 920 ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno); 921 ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno); 922 ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno); 923 leak(fd); 924 } 925 926 /* 927 * When mounted with -o async, the writeback cache mode should delay writes 928 */ 929 TEST_F(WriteBackAsync, delay) 930 { 931 const char FULLPATH[] = "mountpoint/some_file.txt"; 932 const char RELPATH[] = "some_file.txt"; 933 const char *CONTENTS = "abcdefgh"; 934 uint64_t ino = 42; 935 int fd; 936 ssize_t bufsize = strlen(CONTENTS); 937 938 expect_lookup(RELPATH, ino, 0); 939 expect_open(ino, 0, 1); 940 /* Write should be cached, but FUSE_WRITE shouldn't be sent */ 941 EXPECT_CALL(*m_mock, process( 942 ResultOf([=](auto in) { 943 return (in.header.opcode == FUSE_WRITE); 944 }, Eq(true)), 945 _) 946 ).Times(0); 947 948 fd = open(FULLPATH, O_RDWR); 949 EXPECT_LE(0, fd) << strerror(errno); 950 951 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno); 952 953 /* Don't close the file because that would flush the cache */ 954 leak(fd); 955 } 956 957 /* 958 * A direct write should not evict dirty cached data from outside of its own 959 * byte range. 960 */ 961 TEST_F(WriteBackAsync, direct_io_ignores_unrelated_cached) 962 { 963 const char FULLPATH[] = "mountpoint/some_file.txt"; 964 const char RELPATH[] = "some_file.txt"; 965 const char CONTENTS0[] = "abcdefgh"; 966 const char CONTENTS1[] = "ijklmnop"; 967 uint64_t ino = 42; 968 int fd; 969 ssize_t bufsize = strlen(CONTENTS0) + 1; 970 ssize_t fsize = 2 * m_maxbcachebuf; 971 char readbuf[bufsize]; 972 void *zeros; 973 974 zeros = calloc(1, m_maxbcachebuf); 975 ASSERT_NE(nullptr, zeros); 976 977 expect_lookup(RELPATH, ino, fsize); 978 expect_open(ino, 0, 1); 979 expect_read(ino, 0, m_maxbcachebuf, m_maxbcachebuf, zeros); 980 FuseTest::expect_write(ino, m_maxbcachebuf, bufsize, bufsize, 0, 0, 981 CONTENTS1); 982 983 fd = open(FULLPATH, O_RDWR); 984 EXPECT_LE(0, fd) << strerror(errno); 985 986 // Cache first block with dirty data. This will entail first reading 987 // the existing data. 988 ASSERT_EQ(bufsize, pwrite(fd, CONTENTS0, bufsize, 0)) 989 << strerror(errno); 990 991 // Write directly to second block 992 ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno); 993 ASSERT_EQ(bufsize, pwrite(fd, CONTENTS1, bufsize, m_maxbcachebuf)) 994 << strerror(errno); 995 996 // Read from the first block again. Should be serviced by cache. 997 ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno); 998 ASSERT_EQ(bufsize, pread(fd, readbuf, bufsize, 0)) << strerror(errno); 999 ASSERT_STREQ(readbuf, CONTENTS0); 1000 1001 leak(fd); 1002 free(zeros); 1003 } 1004 1005 /* 1006 * If a direct io write partially overlaps one or two blocks of dirty cached 1007 * data, No dirty data should be lost. Admittedly this is a weird test, 1008 * because it would be unusual to use O_DIRECT and the writeback cache. 1009 */ 1010 TEST_F(WriteBackAsync, direct_io_partially_overlaps_cached_block) 1011 { 1012 const char FULLPATH[] = "mountpoint/some_file.txt"; 1013 const char RELPATH[] = "some_file.txt"; 1014 uint64_t ino = 42; 1015 int fd; 1016 off_t bs = m_maxbcachebuf; 1017 ssize_t fsize = 3 * bs; 1018 void *readbuf, *zeros, *ones, *zeroones, *onezeros; 1019 1020 readbuf = malloc(bs); 1021 ASSERT_NE(nullptr, readbuf) << strerror(errno); 1022 zeros = calloc(1, 3 * bs); 1023 ASSERT_NE(nullptr, zeros); 1024 ones = calloc(1, 2 * bs); 1025 ASSERT_NE(nullptr, ones); 1026 memset(ones, 1, 2 * bs); 1027 zeroones = calloc(1, bs); 1028 ASSERT_NE(nullptr, zeroones); 1029 memset((uint8_t*)zeroones + bs / 2, 1, bs / 2); 1030 onezeros = calloc(1, bs); 1031 ASSERT_NE(nullptr, onezeros); 1032 memset(onezeros, 1, bs / 2); 1033 1034 expect_lookup(RELPATH, ino, fsize); 1035 expect_open(ino, 0, 1); 1036 1037 fd = open(FULLPATH, O_RDWR); 1038 EXPECT_LE(0, fd) << strerror(errno); 1039 1040 /* Cache first and third blocks with dirty data. */ 1041 ASSERT_EQ(3 * bs, pwrite(fd, zeros, 3 * bs, 0)) << strerror(errno); 1042 1043 /* 1044 * Write directly to all three blocks. The partially written blocks 1045 * will be flushed because they're dirty. 1046 */ 1047 FuseTest::expect_write(ino, 0, bs, bs, 0, 0, zeros); 1048 FuseTest::expect_write(ino, 2 * bs, bs, bs, 0, 0, zeros); 1049 /* The direct write is split in two because of the m_maxwrite value */ 1050 FuseTest::expect_write(ino, bs / 2, bs, bs, 0, 0, ones); 1051 FuseTest::expect_write(ino, 3 * bs / 2, bs, bs, 0, 0, ones); 1052 ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno); 1053 ASSERT_EQ(2 * bs, pwrite(fd, ones, 2 * bs, bs / 2)) << strerror(errno); 1054 1055 /* 1056 * Read from both the valid and invalid portions of the first and third 1057 * blocks again. This will entail FUSE_READ operations because these 1058 * blocks were invalidated by the direct write. 1059 */ 1060 expect_read(ino, 0, bs, bs, zeroones); 1061 expect_read(ino, 2 * bs, bs, bs, onezeros); 1062 ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno); 1063 ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 0)) << strerror(errno); 1064 EXPECT_EQ(0, memcmp(zeros, readbuf, bs / 2)); 1065 ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 5 * bs / 2)) 1066 << strerror(errno); 1067 EXPECT_EQ(0, memcmp(zeros, readbuf, bs / 2)); 1068 ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, bs / 2)) 1069 << strerror(errno); 1070 EXPECT_EQ(0, memcmp(ones, readbuf, bs / 2)); 1071 ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 2 * bs)) 1072 << strerror(errno); 1073 EXPECT_EQ(0, memcmp(ones, readbuf, bs / 2)); 1074 1075 leak(fd); 1076 free(zeroones); 1077 free(onezeros); 1078 free(ones); 1079 free(zeros); 1080 free(readbuf); 1081 } 1082 1083 /* 1084 * In WriteBack mode, writes may be cached beyond what the server thinks is the 1085 * EOF. In this case, a short read at EOF should _not_ cause fusefs to update 1086 * the file's size. 1087 */ 1088 TEST_F(WriteBackAsync, eof) 1089 { 1090 const char FULLPATH[] = "mountpoint/some_file.txt"; 1091 const char RELPATH[] = "some_file.txt"; 1092 const char *CONTENTS0 = "abcdefgh"; 1093 const char *CONTENTS1 = "ijklmnop"; 1094 uint64_t ino = 42; 1095 int fd; 1096 off_t offset = m_maxbcachebuf; 1097 ssize_t wbufsize = strlen(CONTENTS1); 1098 off_t old_filesize = (off_t)strlen(CONTENTS0); 1099 ssize_t rbufsize = 2 * old_filesize; 1100 char readbuf[rbufsize]; 1101 size_t holesize = rbufsize - old_filesize; 1102 char hole[holesize]; 1103 struct stat sb; 1104 ssize_t r; 1105 1106 expect_lookup(RELPATH, ino, 0); 1107 expect_open(ino, 0, 1); 1108 expect_read(ino, 0, m_maxbcachebuf, old_filesize, CONTENTS0); 1109 1110 fd = open(FULLPATH, O_RDWR); 1111 EXPECT_LE(0, fd) << strerror(errno); 1112 1113 /* Write and cache data beyond EOF */ 1114 ASSERT_EQ(wbufsize, pwrite(fd, CONTENTS1, wbufsize, offset)) 1115 << strerror(errno); 1116 1117 /* Read from the old EOF */ 1118 r = pread(fd, readbuf, rbufsize, 0); 1119 ASSERT_LE(0, r) << strerror(errno); 1120 EXPECT_EQ(rbufsize, r) << "read should've synthesized a hole"; 1121 EXPECT_EQ(0, memcmp(CONTENTS0, readbuf, old_filesize)); 1122 bzero(hole, holesize); 1123 EXPECT_EQ(0, memcmp(hole, readbuf + old_filesize, holesize)); 1124 1125 /* The file's size should still be what was established by pwrite */ 1126 ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno); 1127 EXPECT_EQ(offset + wbufsize, sb.st_size); 1128 leak(fd); 1129 } 1130 1131 /* 1132 * When a file has dirty writes that haven't been flushed, the server's notion 1133 * of its mtime and ctime will be wrong. The kernel should ignore those if it 1134 * gets them from a FUSE_GETATTR before flushing. 1135 */ 1136 TEST_F(WriteBackAsync, timestamps) 1137 { 1138 const char FULLPATH[] = "mountpoint/some_file.txt"; 1139 const char RELPATH[] = "some_file.txt"; 1140 const char *CONTENTS = "abcdefgh"; 1141 ssize_t bufsize = strlen(CONTENTS); 1142 uint64_t ino = 42; 1143 uint64_t attr_valid = 0; 1144 uint64_t attr_valid_nsec = 0; 1145 uint64_t server_time = 12345; 1146 mode_t mode = S_IFREG | 0644; 1147 int fd; 1148 1149 struct stat sb; 1150 1151 EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) 1152 .WillRepeatedly(Invoke( 1153 ReturnImmediate([=](auto in __unused, auto& out) { 1154 SET_OUT_HEADER_LEN(out, entry); 1155 out.body.entry.attr.mode = mode; 1156 out.body.entry.nodeid = ino; 1157 out.body.entry.attr.nlink = 1; 1158 out.body.entry.attr_valid = attr_valid; 1159 out.body.entry.attr_valid_nsec = attr_valid_nsec; 1160 }))); 1161 expect_open(ino, 0, 1); 1162 EXPECT_CALL(*m_mock, process( 1163 ResultOf([=](auto in) { 1164 return (in.header.opcode == FUSE_GETATTR && 1165 in.header.nodeid == ino); 1166 }, Eq(true)), 1167 _) 1168 ).WillRepeatedly(Invoke( 1169 ReturnImmediate([=](auto i __unused, auto& out) { 1170 SET_OUT_HEADER_LEN(out, attr); 1171 out.body.attr.attr.ino = ino; 1172 out.body.attr.attr.mode = mode; 1173 out.body.attr.attr_valid = attr_valid; 1174 out.body.attr.attr_valid_nsec = attr_valid_nsec; 1175 out.body.attr.attr.atime = server_time; 1176 out.body.attr.attr.mtime = server_time; 1177 out.body.attr.attr.ctime = server_time; 1178 }))); 1179 1180 fd = open(FULLPATH, O_RDWR); 1181 EXPECT_LE(0, fd) << strerror(errno); 1182 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno); 1183 1184 ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno); 1185 EXPECT_EQ((time_t)server_time, sb.st_atime); 1186 EXPECT_NE((time_t)server_time, sb.st_mtime); 1187 EXPECT_NE((time_t)server_time, sb.st_ctime); 1188 1189 leak(fd); 1190 } 1191 1192 /* Any dirty timestamp fields should be flushed during a SETATTR */ 1193 TEST_F(WriteBackAsync, timestamps_during_setattr) 1194 { 1195 const char FULLPATH[] = "mountpoint/some_file.txt"; 1196 const char RELPATH[] = "some_file.txt"; 1197 const char *CONTENTS = "abcdefgh"; 1198 ssize_t bufsize = strlen(CONTENTS); 1199 uint64_t ino = 42; 1200 const mode_t newmode = 0755; 1201 int fd; 1202 1203 expect_lookup(RELPATH, ino, 0); 1204 expect_open(ino, 0, 1); 1205 EXPECT_CALL(*m_mock, process( 1206 ResultOf([=](auto in) { 1207 uint32_t valid = FATTR_MODE | FATTR_MTIME | FATTR_CTIME; 1208 return (in.header.opcode == FUSE_SETATTR && 1209 in.header.nodeid == ino && 1210 in.body.setattr.valid == valid); 1211 }, Eq(true)), 1212 _) 1213 ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { 1214 SET_OUT_HEADER_LEN(out, attr); 1215 out.body.attr.attr.ino = ino; 1216 out.body.attr.attr.mode = S_IFREG | newmode; 1217 }))); 1218 1219 fd = open(FULLPATH, O_RDWR); 1220 EXPECT_LE(0, fd) << strerror(errno); 1221 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno); 1222 ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno); 1223 1224 leak(fd); 1225 } 1226 1227 /* fuse_init_out.time_gran controls the granularity of timestamps */ 1228 TEST_P(TimeGran, timestamps_during_setattr) 1229 { 1230 const char FULLPATH[] = "mountpoint/some_file.txt"; 1231 const char RELPATH[] = "some_file.txt"; 1232 const char *CONTENTS = "abcdefgh"; 1233 ssize_t bufsize = strlen(CONTENTS); 1234 uint64_t ino = 42; 1235 const mode_t newmode = 0755; 1236 int fd; 1237 1238 expect_lookup(RELPATH, ino, 0); 1239 expect_open(ino, 0, 1); 1240 EXPECT_CALL(*m_mock, process( 1241 ResultOf([=](auto in) { 1242 uint32_t valid = FATTR_MODE | FATTR_MTIME | FATTR_CTIME; 1243 return (in.header.opcode == FUSE_SETATTR && 1244 in.header.nodeid == ino && 1245 in.body.setattr.valid == valid && 1246 in.body.setattr.mtimensec % m_time_gran == 0 && 1247 in.body.setattr.ctimensec % m_time_gran == 0); 1248 }, Eq(true)), 1249 _) 1250 ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { 1251 SET_OUT_HEADER_LEN(out, attr); 1252 out.body.attr.attr.ino = ino; 1253 out.body.attr.attr.mode = S_IFREG | newmode; 1254 }))); 1255 1256 fd = open(FULLPATH, O_RDWR); 1257 EXPECT_LE(0, fd) << strerror(errno); 1258 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno); 1259 ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno); 1260 1261 leak(fd); 1262 } 1263 1264 INSTANTIATE_TEST_CASE_P(RA, TimeGran, Range(0u, 10u)); 1265 1266 /* 1267 * Without direct_io, writes should be committed to cache 1268 */ 1269 TEST_F(Write, writethrough) 1270 { 1271 const char FULLPATH[] = "mountpoint/some_file.txt"; 1272 const char RELPATH[] = "some_file.txt"; 1273 const char *CONTENTS = "abcdefgh"; 1274 uint64_t ino = 42; 1275 int fd; 1276 ssize_t bufsize = strlen(CONTENTS); 1277 uint8_t readbuf[bufsize]; 1278 1279 expect_lookup(RELPATH, ino, 0); 1280 expect_open(ino, 0, 1); 1281 expect_write(ino, 0, bufsize, bufsize, CONTENTS); 1282 1283 fd = open(FULLPATH, O_RDWR); 1284 EXPECT_LE(0, fd) << strerror(errno); 1285 1286 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno); 1287 /* 1288 * A subsequent read should be serviced by cache, without querying the 1289 * filesystem daemon 1290 */ 1291 ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno); 1292 ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno); 1293 leak(fd); 1294 } 1295 1296 /* Writes that extend a file should update the cached file size */ 1297 TEST_F(Write, update_file_size) 1298 { 1299 const char FULLPATH[] = "mountpoint/some_file.txt"; 1300 const char RELPATH[] = "some_file.txt"; 1301 const char *CONTENTS = "abcdefgh"; 1302 struct stat sb; 1303 uint64_t ino = 42; 1304 int fd; 1305 ssize_t bufsize = strlen(CONTENTS); 1306 1307 expect_lookup(RELPATH, ino, 0); 1308 expect_open(ino, 0, 1); 1309 expect_write(ino, 0, bufsize, bufsize, CONTENTS); 1310 1311 fd = open(FULLPATH, O_RDWR); 1312 EXPECT_LE(0, fd) << strerror(errno); 1313 1314 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno); 1315 /* Get cached attributes */ 1316 ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno); 1317 ASSERT_EQ(bufsize, sb.st_size); 1318 leak(fd); 1319 } 1320