1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2019 The FreeBSD Foundation
5 *
6 * This software was developed by BFF Storage Systems, LLC under sponsorship
7 * from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 extern "C" {
32 #include <sys/types.h>
33
34 #include <fcntl.h>
35 #include <pthread.h>
36 }
37
38 #include "mockfs.hh"
39 #include "utils.hh"
40
41 using namespace testing;
42
43 /*
44 * FUSE asynchonous notification
45 *
46 * FUSE servers can send unprompted notification messages for things like cache
47 * invalidation. This file tests our client's handling of those messages.
48 */
49
50 class Notify: public FuseTest {
51 public:
52 /* Ignore an optional FUSE_FSYNC */
maybe_expect_fsync(uint64_t ino)53 void maybe_expect_fsync(uint64_t ino)
54 {
55 EXPECT_CALL(*m_mock, process(
56 ResultOf([=](auto in) {
57 return (in.header.opcode == FUSE_FSYNC &&
58 in.header.nodeid == ino);
59 }, Eq(true)),
60 _)
61 ).WillOnce(Invoke(ReturnErrno(0)));
62 }
63
expect_lookup(uint64_t parent,const char * relpath,uint64_t ino,off_t size,Sequence & seq)64 void expect_lookup(uint64_t parent, const char *relpath, uint64_t ino,
65 off_t size, Sequence &seq)
66 {
67 EXPECT_LOOKUP(parent, relpath)
68 .InSequence(seq)
69 .WillOnce(Invoke(
70 ReturnImmediate([=](auto in __unused, auto& out) {
71 SET_OUT_HEADER_LEN(out, entry);
72 out.body.entry.attr.mode = S_IFREG | 0644;
73 out.body.entry.nodeid = ino;
74 out.body.entry.attr.ino = ino;
75 out.body.entry.attr.nlink = 1;
76 out.body.entry.attr.size = size;
77 out.body.entry.attr_valid = UINT64_MAX;
78 out.body.entry.entry_valid = UINT64_MAX;
79 })));
80 }
81 };
82
83 class NotifyWriteback: public Notify {
84 public:
SetUp()85 virtual void SetUp() {
86 m_init_flags |= FUSE_WRITEBACK_CACHE;
87 m_async = true;
88 Notify::SetUp();
89 if (IsSkipped())
90 return;
91 }
92
expect_write(uint64_t ino,uint64_t offset,uint64_t size,const void * contents)93 void expect_write(uint64_t ino, uint64_t offset, uint64_t size,
94 const void *contents)
95 {
96 FuseTest::expect_write(ino, offset, size, size, 0, 0, contents);
97 }
98
99 };
100
101 struct inval_entry_args {
102 MockFS *mock;
103 ino_t parent;
104 const char *name;
105 size_t namelen;
106 };
107
inval_entry(void * arg)108 static void* inval_entry(void* arg) {
109 const struct inval_entry_args *iea = (struct inval_entry_args*)arg;
110 ssize_t r;
111
112 r = iea->mock->notify_inval_entry(iea->parent, iea->name, iea->namelen);
113 if (r >= 0)
114 return 0;
115 else
116 return (void*)(intptr_t)errno;
117 }
118
119 struct inval_inode_args {
120 MockFS *mock;
121 ino_t ino;
122 off_t off;
123 ssize_t len;
124 };
125
126 struct store_args {
127 MockFS *mock;
128 ino_t nodeid;
129 off_t offset;
130 ssize_t size;
131 const void* data;
132 };
133
inval_inode(void * arg)134 static void* inval_inode(void* arg) {
135 const struct inval_inode_args *iia = (struct inval_inode_args*)arg;
136 ssize_t r;
137
138 r = iia->mock->notify_inval_inode(iia->ino, iia->off, iia->len);
139 if (r >= 0)
140 return 0;
141 else
142 return (void*)(intptr_t)errno;
143 }
144
store(void * arg)145 static void* store(void* arg) {
146 const struct store_args *sa = (struct store_args*)arg;
147 ssize_t r;
148
149 r = sa->mock->notify_store(sa->nodeid, sa->offset, sa->data, sa->size);
150 if (r >= 0)
151 return 0;
152 else
153 return (void*)(intptr_t)errno;
154 }
155
156 /* Invalidate a nonexistent entry */
TEST_F(Notify,inval_entry_nonexistent)157 TEST_F(Notify, inval_entry_nonexistent)
158 {
159 const static char *name = "foo";
160 struct inval_entry_args iea;
161 void *thr0_value;
162 pthread_t th0;
163
164 iea.mock = m_mock;
165 iea.parent = FUSE_ROOT_ID;
166 iea.name = name;
167 iea.namelen = strlen(name);
168 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
169 << strerror(errno);
170 pthread_join(th0, &thr0_value);
171 /* It's not an error for an entry to not be cached */
172 EXPECT_EQ(0, (intptr_t)thr0_value);
173 }
174
175 /* Invalidate a cached entry */
TEST_F(Notify,inval_entry)176 TEST_F(Notify, inval_entry)
177 {
178 const static char FULLPATH[] = "mountpoint/foo";
179 const static char RELPATH[] = "foo";
180 struct inval_entry_args iea;
181 struct stat sb;
182 void *thr0_value;
183 uint64_t ino0 = 42;
184 uint64_t ino1 = 43;
185 Sequence seq;
186 pthread_t th0;
187
188 expect_lookup(FUSE_ROOT_ID, RELPATH, ino0, 0, seq);
189 expect_lookup(FUSE_ROOT_ID, RELPATH, ino1, 0, seq);
190
191 /* Fill the entry cache */
192 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
193 EXPECT_EQ(ino0, sb.st_ino);
194
195 /* Now invalidate the entry */
196 iea.mock = m_mock;
197 iea.parent = FUSE_ROOT_ID;
198 iea.name = RELPATH;
199 iea.namelen = strlen(RELPATH);
200 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
201 << strerror(errno);
202 pthread_join(th0, &thr0_value);
203 EXPECT_EQ(0, (intptr_t)thr0_value);
204
205 /* The second lookup should return the alternate ino */
206 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
207 EXPECT_EQ(ino1, sb.st_ino);
208 }
209
210 /*
211 * Invalidate a cached entry beneath the root, which uses a slightly different
212 * code path.
213 */
TEST_F(Notify,inval_entry_below_root)214 TEST_F(Notify, inval_entry_below_root)
215 {
216 const static char FULLPATH[] = "mountpoint/some_dir/foo";
217 const static char DNAME[] = "some_dir";
218 const static char FNAME[] = "foo";
219 struct inval_entry_args iea;
220 struct stat sb;
221 void *thr0_value;
222 uint64_t dir_ino = 41;
223 uint64_t ino0 = 42;
224 uint64_t ino1 = 43;
225 Sequence seq;
226 pthread_t th0;
227
228 EXPECT_LOOKUP(FUSE_ROOT_ID, DNAME)
229 .WillOnce(Invoke(
230 ReturnImmediate([=](auto in __unused, auto& out) {
231 SET_OUT_HEADER_LEN(out, entry);
232 out.body.entry.attr.mode = S_IFDIR | 0755;
233 out.body.entry.nodeid = dir_ino;
234 out.body.entry.attr.nlink = 2;
235 out.body.entry.attr_valid = UINT64_MAX;
236 out.body.entry.entry_valid = UINT64_MAX;
237 })));
238 expect_lookup(dir_ino, FNAME, ino0, 0, seq);
239 expect_lookup(dir_ino, FNAME, ino1, 0, seq);
240
241 /* Fill the entry cache */
242 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
243 EXPECT_EQ(ino0, sb.st_ino);
244
245 /* Now invalidate the entry */
246 iea.mock = m_mock;
247 iea.parent = dir_ino;
248 iea.name = FNAME;
249 iea.namelen = strlen(FNAME);
250 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
251 << strerror(errno);
252 pthread_join(th0, &thr0_value);
253 EXPECT_EQ(0, (intptr_t)thr0_value);
254
255 /* The second lookup should return the alternate ino */
256 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
257 EXPECT_EQ(ino1, sb.st_ino);
258 }
259
260 /* Invalidating an entry invalidates the parent directory's attributes */
TEST_F(Notify,inval_entry_invalidates_parent_attrs)261 TEST_F(Notify, inval_entry_invalidates_parent_attrs)
262 {
263 const static char FULLPATH[] = "mountpoint/foo";
264 const static char RELPATH[] = "foo";
265 struct inval_entry_args iea;
266 struct stat sb;
267 void *thr0_value;
268 uint64_t ino = 42;
269 Sequence seq;
270 pthread_t th0;
271
272 expect_lookup(FUSE_ROOT_ID, RELPATH, ino, 0, seq);
273 EXPECT_CALL(*m_mock, process(
274 ResultOf([=](auto in) {
275 return (in.header.opcode == FUSE_GETATTR &&
276 in.header.nodeid == FUSE_ROOT_ID);
277 }, Eq(true)),
278 _)
279 ).Times(2)
280 .WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
281 SET_OUT_HEADER_LEN(out, attr);
282 out.body.attr.attr.mode = S_IFDIR | 0755;
283 out.body.attr.attr_valid = UINT64_MAX;
284 })));
285
286 /* Fill the attr and entry cache */
287 ASSERT_EQ(0, stat("mountpoint", &sb)) << strerror(errno);
288 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
289
290 /* Now invalidate the entry */
291 iea.mock = m_mock;
292 iea.parent = FUSE_ROOT_ID;
293 iea.name = RELPATH;
294 iea.namelen = strlen(RELPATH);
295 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
296 << strerror(errno);
297 pthread_join(th0, &thr0_value);
298 EXPECT_EQ(0, (intptr_t)thr0_value);
299
300 /* /'s attribute cache should be cleared */
301 ASSERT_EQ(0, stat("mountpoint", &sb)) << strerror(errno);
302 }
303
304
TEST_F(Notify,inval_inode_nonexistent)305 TEST_F(Notify, inval_inode_nonexistent)
306 {
307 struct inval_inode_args iia;
308 ino_t ino = 42;
309 void *thr0_value;
310 pthread_t th0;
311
312 iia.mock = m_mock;
313 iia.ino = ino;
314 iia.off = 0;
315 iia.len = 0;
316 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
317 << strerror(errno);
318 pthread_join(th0, &thr0_value);
319 /* It's not an error for an inode to not be cached */
320 EXPECT_EQ(0, (intptr_t)thr0_value);
321 }
322
TEST_F(Notify,inval_inode_with_clean_cache)323 TEST_F(Notify, inval_inode_with_clean_cache)
324 {
325 const static char FULLPATH[] = "mountpoint/foo";
326 const static char RELPATH[] = "foo";
327 const char CONTENTS0[] = "abcdefgh";
328 const char CONTENTS1[] = "ijklmnopqrstuvwxyz";
329 struct inval_inode_args iia;
330 struct stat sb;
331 ino_t ino = 42;
332 void *thr0_value;
333 Sequence seq;
334 uid_t uid = 12345;
335 pthread_t th0;
336 ssize_t size0 = sizeof(CONTENTS0);
337 ssize_t size1 = sizeof(CONTENTS1);
338 char buf[80];
339 int fd;
340
341 expect_lookup(FUSE_ROOT_ID, RELPATH, ino, size0, seq);
342 expect_open(ino, 0, 1);
343 EXPECT_CALL(*m_mock, process(
344 ResultOf([=](auto in) {
345 return (in.header.opcode == FUSE_GETATTR &&
346 in.header.nodeid == ino);
347 }, Eq(true)),
348 _)
349 ).WillOnce(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
350 SET_OUT_HEADER_LEN(out, attr);
351 out.body.attr.attr.mode = S_IFREG | 0644;
352 out.body.attr.attr_valid = UINT64_MAX;
353 out.body.attr.attr.size = size1;
354 out.body.attr.attr.uid = uid;
355 })));
356 expect_read(ino, 0, size0, size0, CONTENTS0);
357 expect_read(ino, 0, size1, size1, CONTENTS1);
358
359 /* Fill the data cache */
360 fd = open(FULLPATH, O_RDWR);
361 ASSERT_LE(0, fd) << strerror(errno);
362 ASSERT_EQ(size0, read(fd, buf, size0)) << strerror(errno);
363 EXPECT_EQ(0, memcmp(buf, CONTENTS0, size0));
364
365 /* Evict the data cache */
366 iia.mock = m_mock;
367 iia.ino = ino;
368 iia.off = 0;
369 iia.len = 0;
370 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
371 << strerror(errno);
372 pthread_join(th0, &thr0_value);
373 EXPECT_EQ(0, (intptr_t)thr0_value);
374
375 /* cache attributes were purged; this will trigger a new GETATTR */
376 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
377 EXPECT_EQ(uid, sb.st_uid);
378 EXPECT_EQ(size1, sb.st_size);
379
380 /* This read should not be serviced by cache */
381 ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
382 ASSERT_EQ(size1, read(fd, buf, size1)) << strerror(errno);
383 EXPECT_EQ(0, memcmp(buf, CONTENTS1, size1));
384
385 leak(fd);
386 }
387
388 /* FUSE_NOTIFY_STORE with a file that's not in the entry cache */
389 /* disabled because FUSE_NOTIFY_STORE is not yet implemented */
TEST_F(Notify,DISABLED_store_nonexistent)390 TEST_F(Notify, DISABLED_store_nonexistent)
391 {
392 struct store_args sa;
393 ino_t ino = 42;
394 void *thr0_value;
395 pthread_t th0;
396
397 sa.mock = m_mock;
398 sa.nodeid = ino;
399 sa.offset = 0;
400 sa.size = 0;
401 ASSERT_EQ(0, pthread_create(&th0, NULL, store, &sa)) << strerror(errno);
402 pthread_join(th0, &thr0_value);
403 /* It's not an error for a file to be unknown to the kernel */
404 EXPECT_EQ(0, (intptr_t)thr0_value);
405 }
406
407 /* Store data into for a file that does not yet have anything cached */
408 /* disabled because FUSE_NOTIFY_STORE is not yet implemented */
TEST_F(Notify,DISABLED_store_with_blank_cache)409 TEST_F(Notify, DISABLED_store_with_blank_cache)
410 {
411 const static char FULLPATH[] = "mountpoint/foo";
412 const static char RELPATH[] = "foo";
413 const char CONTENTS1[] = "ijklmnopqrstuvwxyz";
414 struct store_args sa;
415 ino_t ino = 42;
416 void *thr0_value;
417 Sequence seq;
418 pthread_t th0;
419 ssize_t size1 = sizeof(CONTENTS1);
420 char buf[80];
421 int fd;
422
423 expect_lookup(FUSE_ROOT_ID, RELPATH, ino, size1, seq);
424 expect_open(ino, 0, 1);
425
426 /* Fill the data cache */
427 fd = open(FULLPATH, O_RDWR);
428 ASSERT_LE(0, fd) << strerror(errno);
429
430 /* Evict the data cache */
431 sa.mock = m_mock;
432 sa.nodeid = ino;
433 sa.offset = 0;
434 sa.size = size1;
435 sa.data = (const void*)CONTENTS1;
436 ASSERT_EQ(0, pthread_create(&th0, NULL, store, &sa)) << strerror(errno);
437 pthread_join(th0, &thr0_value);
438 EXPECT_EQ(0, (intptr_t)thr0_value);
439
440 /* This read should be serviced by cache */
441 ASSERT_EQ(size1, read(fd, buf, size1)) << strerror(errno);
442 EXPECT_EQ(0, memcmp(buf, CONTENTS1, size1));
443
444 leak(fd);
445 }
446
TEST_F(NotifyWriteback,inval_inode_with_dirty_cache)447 TEST_F(NotifyWriteback, inval_inode_with_dirty_cache)
448 {
449 const static char FULLPATH[] = "mountpoint/foo";
450 const static char RELPATH[] = "foo";
451 const char CONTENTS[] = "abcdefgh";
452 struct inval_inode_args iia;
453 ino_t ino = 42;
454 void *thr0_value;
455 Sequence seq;
456 pthread_t th0;
457 ssize_t bufsize = sizeof(CONTENTS);
458 int fd;
459
460 expect_lookup(FUSE_ROOT_ID, RELPATH, ino, 0, seq);
461 expect_open(ino, 0, 1);
462
463 /* Fill the data cache */
464 fd = open(FULLPATH, O_RDWR);
465 ASSERT_LE(0, fd);
466 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
467
468 expect_write(ino, 0, bufsize, CONTENTS);
469 /*
470 * The FUSE protocol does not require an fsync here, but FreeBSD's
471 * bufobj_invalbuf sends it anyway
472 */
473 maybe_expect_fsync(ino);
474
475 /* Evict the data cache */
476 iia.mock = m_mock;
477 iia.ino = ino;
478 iia.off = 0;
479 iia.len = 0;
480 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
481 << strerror(errno);
482 pthread_join(th0, &thr0_value);
483 EXPECT_EQ(0, (intptr_t)thr0_value);
484
485 leak(fd);
486 }
487
TEST_F(NotifyWriteback,inval_inode_attrs_only)488 TEST_F(NotifyWriteback, inval_inode_attrs_only)
489 {
490 const static char FULLPATH[] = "mountpoint/foo";
491 const static char RELPATH[] = "foo";
492 const char CONTENTS[] = "abcdefgh";
493 struct inval_inode_args iia;
494 struct stat sb;
495 uid_t uid = 12345;
496 ino_t ino = 42;
497 void *thr0_value;
498 Sequence seq;
499 pthread_t th0;
500 ssize_t bufsize = sizeof(CONTENTS);
501 int fd;
502
503 expect_lookup(FUSE_ROOT_ID, RELPATH, ino, 0, seq);
504 expect_open(ino, 0, 1);
505 EXPECT_CALL(*m_mock, process(
506 ResultOf([=](auto in) {
507 return (in.header.opcode == FUSE_WRITE);
508 }, Eq(true)),
509 _)
510 ).Times(0);
511 EXPECT_CALL(*m_mock, process(
512 ResultOf([=](auto in) {
513 return (in.header.opcode == FUSE_GETATTR &&
514 in.header.nodeid == ino);
515 }, Eq(true)),
516 _)
517 ).WillOnce(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
518 SET_OUT_HEADER_LEN(out, attr);
519 out.body.attr.attr.mode = S_IFREG | 0644;
520 out.body.attr.attr_valid = UINT64_MAX;
521 out.body.attr.attr.size = bufsize;
522 out.body.attr.attr.uid = uid;
523 })));
524
525 /* Fill the data cache */
526 fd = open(FULLPATH, O_RDWR);
527 ASSERT_LE(0, fd) << strerror(errno);
528 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
529
530 /* Evict the attributes, but not data cache */
531 iia.mock = m_mock;
532 iia.ino = ino;
533 iia.off = -1;
534 iia.len = 0;
535 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
536 << strerror(errno);
537 pthread_join(th0, &thr0_value);
538 EXPECT_EQ(0, (intptr_t)thr0_value);
539
540 /* cache attributes were been purged; this will trigger a new GETATTR */
541 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
542 EXPECT_EQ(uid, sb.st_uid);
543 EXPECT_EQ(bufsize, sb.st_size);
544
545 leak(fd);
546 }
547