xref: /freebsd/tests/sys/fs/fusefs/notify.cc (revision 2e3507c25e42292b45a5482e116d278f5515d04d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 The FreeBSD Foundation
5  *
6  * This software was developed by BFF Storage Systems, LLC under sponsorship
7  * from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 extern "C" {
32 #include <sys/types.h>
33 #include <sys/sysctl.h>
34 
35 #include <fcntl.h>
36 #include <pthread.h>
37 }
38 
39 #include "mockfs.hh"
40 #include "utils.hh"
41 
42 using namespace testing;
43 
44 /*
45  * FUSE asynchonous notification
46  *
47  * FUSE servers can send unprompted notification messages for things like cache
48  * invalidation.  This file tests our client's handling of those messages.
49  */
50 
51 class Notify: public FuseTest {
52 public:
53 /* Ignore an optional FUSE_FSYNC */
54 void maybe_expect_fsync(uint64_t ino)
55 {
56 	EXPECT_CALL(*m_mock, process(
57 		ResultOf([=](auto in) {
58 			return (in.header.opcode == FUSE_FSYNC &&
59 				in.header.nodeid == ino);
60 		}, Eq(true)),
61 		_)
62 	).WillOnce(Invoke(ReturnErrno(0)));
63 }
64 
65 void expect_lookup(uint64_t parent, const char *relpath, uint64_t ino,
66 	off_t size, Sequence &seq)
67 {
68 	EXPECT_LOOKUP(parent, relpath)
69 	.InSequence(seq)
70 	.WillOnce(Invoke(
71 		ReturnImmediate([=](auto in __unused, auto& out) {
72 		SET_OUT_HEADER_LEN(out, entry);
73 		out.body.entry.attr.mode = S_IFREG | 0644;
74 		out.body.entry.nodeid = ino;
75 		out.body.entry.attr.ino = ino;
76 		out.body.entry.attr.nlink = 1;
77 		out.body.entry.attr.size = size;
78 		out.body.entry.attr_valid = UINT64_MAX;
79 		out.body.entry.entry_valid = UINT64_MAX;
80 	})));
81 }
82 };
83 
84 class NotifyWriteback: public Notify {
85 public:
86 virtual void SetUp() {
87 	m_init_flags |= FUSE_WRITEBACK_CACHE;
88 	m_async = true;
89 	Notify::SetUp();
90 	if (IsSkipped())
91 		return;
92 }
93 
94 void expect_write(uint64_t ino, uint64_t offset, uint64_t size,
95 	const void *contents)
96 {
97 	FuseTest::expect_write(ino, offset, size, size, 0, 0, contents);
98 }
99 
100 };
101 
102 struct inval_entry_args {
103 	MockFS		*mock;
104 	ino_t		parent;
105 	const char	*name;
106 	size_t		namelen;
107 };
108 
109 static void* inval_entry(void* arg) {
110 	const struct inval_entry_args *iea = (struct inval_entry_args*)arg;
111 	ssize_t r;
112 
113 	r = iea->mock->notify_inval_entry(iea->parent, iea->name, iea->namelen);
114 	if (r >= 0)
115 		return 0;
116 	else
117 		return (void*)(intptr_t)errno;
118 }
119 
120 struct inval_inode_args {
121 	MockFS		*mock;
122 	ino_t		ino;
123 	off_t		off;
124 	ssize_t		len;
125 };
126 
127 struct store_args {
128 	MockFS		*mock;
129 	ino_t		nodeid;
130 	off_t		offset;
131 	ssize_t		size;
132 	const void*	data;
133 };
134 
135 static void* inval_inode(void* arg) {
136 	const struct inval_inode_args *iia = (struct inval_inode_args*)arg;
137 	ssize_t r;
138 
139 	r = iia->mock->notify_inval_inode(iia->ino, iia->off, iia->len);
140 	if (r >= 0)
141 		return 0;
142 	else
143 		return (void*)(intptr_t)errno;
144 }
145 
146 static void* store(void* arg) {
147 	const struct store_args *sa = (struct store_args*)arg;
148 	ssize_t r;
149 
150 	r = sa->mock->notify_store(sa->nodeid, sa->offset, sa->data, sa->size);
151 	if (r >= 0)
152 		return 0;
153 	else
154 		return (void*)(intptr_t)errno;
155 }
156 
157 /* Invalidate a nonexistent entry */
158 TEST_F(Notify, inval_entry_nonexistent)
159 {
160 	const static char *name = "foo";
161 	struct inval_entry_args iea;
162 	void *thr0_value;
163 	pthread_t th0;
164 
165 	iea.mock = m_mock;
166 	iea.parent = FUSE_ROOT_ID;
167 	iea.name = name;
168 	iea.namelen = strlen(name);
169 	ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
170 		<< strerror(errno);
171 	pthread_join(th0, &thr0_value);
172 	/* It's not an error for an entry to not be cached */
173 	EXPECT_EQ(0, (intptr_t)thr0_value);
174 }
175 
176 /* Invalidate a cached entry */
177 TEST_F(Notify, inval_entry)
178 {
179 	const static char FULLPATH[] = "mountpoint/foo";
180 	const static char RELPATH[] = "foo";
181 	struct inval_entry_args iea;
182 	struct stat sb;
183 	void *thr0_value;
184 	uint64_t ino0 = 42;
185 	uint64_t ino1 = 43;
186 	Sequence seq;
187 	pthread_t th0;
188 
189 	expect_lookup(FUSE_ROOT_ID, RELPATH, ino0, 0, seq);
190 	expect_lookup(FUSE_ROOT_ID, RELPATH, ino1, 0, seq);
191 
192 	/* Fill the entry cache */
193 	ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
194 	EXPECT_EQ(ino0, sb.st_ino);
195 
196 	/* Now invalidate the entry */
197 	iea.mock = m_mock;
198 	iea.parent = FUSE_ROOT_ID;
199 	iea.name = RELPATH;
200 	iea.namelen = strlen(RELPATH);
201 	ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
202 		<< strerror(errno);
203 	pthread_join(th0, &thr0_value);
204 	EXPECT_EQ(0, (intptr_t)thr0_value);
205 
206 	/* The second lookup should return the alternate ino */
207 	ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
208 	EXPECT_EQ(ino1, sb.st_ino);
209 }
210 
211 /*
212  * Invalidate a cached entry beneath the root, which uses a slightly different
213  * code path.
214  */
215 TEST_F(Notify, inval_entry_below_root)
216 {
217 	const static char FULLPATH[] = "mountpoint/some_dir/foo";
218 	const static char DNAME[] = "some_dir";
219 	const static char FNAME[] = "foo";
220 	struct inval_entry_args iea;
221 	struct stat sb;
222 	void *thr0_value;
223 	uint64_t dir_ino = 41;
224 	uint64_t ino0 = 42;
225 	uint64_t ino1 = 43;
226 	Sequence seq;
227 	pthread_t th0;
228 
229 	EXPECT_LOOKUP(FUSE_ROOT_ID, DNAME)
230 	.WillOnce(Invoke(
231 		ReturnImmediate([=](auto in __unused, auto& out) {
232 		SET_OUT_HEADER_LEN(out, entry);
233 		out.body.entry.attr.mode = S_IFDIR | 0755;
234 		out.body.entry.nodeid = dir_ino;
235 		out.body.entry.attr.nlink = 2;
236 		out.body.entry.attr_valid = UINT64_MAX;
237 		out.body.entry.entry_valid = UINT64_MAX;
238 	})));
239 	expect_lookup(dir_ino, FNAME, ino0, 0, seq);
240 	expect_lookup(dir_ino, FNAME, ino1, 0, seq);
241 
242 	/* Fill the entry cache */
243 	ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
244 	EXPECT_EQ(ino0, sb.st_ino);
245 
246 	/* Now invalidate the entry */
247 	iea.mock = m_mock;
248 	iea.parent = dir_ino;
249 	iea.name = FNAME;
250 	iea.namelen = strlen(FNAME);
251 	ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
252 		<< strerror(errno);
253 	pthread_join(th0, &thr0_value);
254 	EXPECT_EQ(0, (intptr_t)thr0_value);
255 
256 	/* The second lookup should return the alternate ino */
257 	ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
258 	EXPECT_EQ(ino1, sb.st_ino);
259 }
260 
261 /* Invalidating an entry invalidates the parent directory's attributes */
262 TEST_F(Notify, inval_entry_invalidates_parent_attrs)
263 {
264 	const static char FULLPATH[] = "mountpoint/foo";
265 	const static char RELPATH[] = "foo";
266 	struct inval_entry_args iea;
267 	struct stat sb;
268 	void *thr0_value;
269 	uint64_t ino = 42;
270 	Sequence seq;
271 	pthread_t th0;
272 
273 	expect_lookup(FUSE_ROOT_ID, RELPATH, ino, 0, seq);
274 	EXPECT_CALL(*m_mock, process(
275 		ResultOf([=](auto in) {
276 			return (in.header.opcode == FUSE_GETATTR &&
277 				in.header.nodeid == FUSE_ROOT_ID);
278 		}, Eq(true)),
279 		_)
280 	).Times(2)
281 	.WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
282 		SET_OUT_HEADER_LEN(out, attr);
283 		out.body.attr.attr.mode = S_IFDIR | 0755;
284 		out.body.attr.attr_valid = UINT64_MAX;
285 	})));
286 
287 	/* Fill the attr and entry cache */
288 	ASSERT_EQ(0, stat("mountpoint", &sb)) << strerror(errno);
289 	ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
290 
291 	/* Now invalidate the entry */
292 	iea.mock = m_mock;
293 	iea.parent = FUSE_ROOT_ID;
294 	iea.name = RELPATH;
295 	iea.namelen = strlen(RELPATH);
296 	ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
297 		<< strerror(errno);
298 	pthread_join(th0, &thr0_value);
299 	EXPECT_EQ(0, (intptr_t)thr0_value);
300 
301 	/* /'s attribute cache should be cleared */
302 	ASSERT_EQ(0, stat("mountpoint", &sb)) << strerror(errno);
303 }
304 
305 
306 TEST_F(Notify, inval_inode_nonexistent)
307 {
308 	struct inval_inode_args iia;
309 	ino_t ino = 42;
310 	void *thr0_value;
311 	pthread_t th0;
312 
313 	iia.mock = m_mock;
314 	iia.ino = ino;
315 	iia.off = 0;
316 	iia.len = 0;
317 	ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
318 		<< strerror(errno);
319 	pthread_join(th0, &thr0_value);
320 	/* It's not an error for an inode to not be cached */
321 	EXPECT_EQ(0, (intptr_t)thr0_value);
322 }
323 
324 TEST_F(Notify, inval_inode_with_clean_cache)
325 {
326 	const static char FULLPATH[] = "mountpoint/foo";
327 	const static char RELPATH[] = "foo";
328 	const char CONTENTS0[] = "abcdefgh";
329 	const char CONTENTS1[] = "ijklmnopqrstuvwxyz";
330 	struct inval_inode_args iia;
331 	struct stat sb;
332 	ino_t ino = 42;
333 	void *thr0_value;
334 	Sequence seq;
335 	uid_t uid = 12345;
336 	pthread_t th0;
337 	ssize_t size0 = sizeof(CONTENTS0);
338 	ssize_t size1 = sizeof(CONTENTS1);
339 	char buf[80];
340 	int fd;
341 
342 	expect_lookup(FUSE_ROOT_ID, RELPATH, ino, size0, seq);
343 	expect_open(ino, 0, 1);
344 	EXPECT_CALL(*m_mock, process(
345 		ResultOf([=](auto in) {
346 			return (in.header.opcode == FUSE_GETATTR &&
347 				in.header.nodeid == ino);
348 		}, Eq(true)),
349 		_)
350 	).WillOnce(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
351 		SET_OUT_HEADER_LEN(out, attr);
352 		out.body.attr.attr.mode = S_IFREG | 0644;
353 		out.body.attr.attr_valid = UINT64_MAX;
354 		out.body.attr.attr.size = size1;
355 		out.body.attr.attr.uid = uid;
356 	})));
357 	expect_read(ino, 0, size0, size0, CONTENTS0);
358 	expect_read(ino, 0, size1, size1, CONTENTS1);
359 
360 	/* Fill the data cache */
361 	fd = open(FULLPATH, O_RDWR);
362 	ASSERT_LE(0, fd) << strerror(errno);
363 	ASSERT_EQ(size0, read(fd, buf, size0)) << strerror(errno);
364 	EXPECT_EQ(0, memcmp(buf, CONTENTS0, size0));
365 
366 	/* Evict the data cache */
367 	iia.mock = m_mock;
368 	iia.ino = ino;
369 	iia.off = 0;
370 	iia.len = 0;
371 	ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
372 		<< strerror(errno);
373 	pthread_join(th0, &thr0_value);
374 	EXPECT_EQ(0, (intptr_t)thr0_value);
375 
376 	/* cache attributes were purged; this will trigger a new GETATTR */
377 	ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
378 	EXPECT_EQ(uid, sb.st_uid);
379 	EXPECT_EQ(size1, sb.st_size);
380 
381 	/* This read should not be serviced by cache */
382 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
383 	ASSERT_EQ(size1, read(fd, buf, size1)) << strerror(errno);
384 	EXPECT_EQ(0, memcmp(buf, CONTENTS1, size1));
385 
386 	leak(fd);
387 }
388 
389 /* FUSE_NOTIFY_STORE with a file that's not in the entry cache */
390 /* disabled because FUSE_NOTIFY_STORE is not yet implemented */
391 TEST_F(Notify, DISABLED_store_nonexistent)
392 {
393 	struct store_args sa;
394 	ino_t ino = 42;
395 	void *thr0_value;
396 	pthread_t th0;
397 
398 	sa.mock = m_mock;
399 	sa.nodeid = ino;
400 	sa.offset = 0;
401 	sa.size = 0;
402 	ASSERT_EQ(0, pthread_create(&th0, NULL, store, &sa)) << strerror(errno);
403 	pthread_join(th0, &thr0_value);
404 	/* It's not an error for a file to be unknown to the kernel */
405 	EXPECT_EQ(0, (intptr_t)thr0_value);
406 }
407 
408 /* Store data into for a file that does not yet have anything cached */
409 /* disabled because FUSE_NOTIFY_STORE is not yet implemented */
410 TEST_F(Notify, DISABLED_store_with_blank_cache)
411 {
412 	const static char FULLPATH[] = "mountpoint/foo";
413 	const static char RELPATH[] = "foo";
414 	const char CONTENTS1[] = "ijklmnopqrstuvwxyz";
415 	struct store_args sa;
416 	ino_t ino = 42;
417 	void *thr0_value;
418 	Sequence seq;
419 	pthread_t th0;
420 	ssize_t size1 = sizeof(CONTENTS1);
421 	char buf[80];
422 	int fd;
423 
424 	expect_lookup(FUSE_ROOT_ID, RELPATH, ino, size1, seq);
425 	expect_open(ino, 0, 1);
426 
427 	/* Fill the data cache */
428 	fd = open(FULLPATH, O_RDWR);
429 	ASSERT_LE(0, fd) << strerror(errno);
430 
431 	/* Evict the data cache */
432 	sa.mock = m_mock;
433 	sa.nodeid = ino;
434 	sa.offset = 0;
435 	sa.size = size1;
436 	sa.data = (const void*)CONTENTS1;
437 	ASSERT_EQ(0, pthread_create(&th0, NULL, store, &sa)) << strerror(errno);
438 	pthread_join(th0, &thr0_value);
439 	EXPECT_EQ(0, (intptr_t)thr0_value);
440 
441 	/* This read should be serviced by cache */
442 	ASSERT_EQ(size1, read(fd, buf, size1)) << strerror(errno);
443 	EXPECT_EQ(0, memcmp(buf, CONTENTS1, size1));
444 
445 	leak(fd);
446 }
447 
448 TEST_F(NotifyWriteback, inval_inode_with_dirty_cache)
449 {
450 	const static char FULLPATH[] = "mountpoint/foo";
451 	const static char RELPATH[] = "foo";
452 	const char CONTENTS[] = "abcdefgh";
453 	struct inval_inode_args iia;
454 	ino_t ino = 42;
455 	void *thr0_value;
456 	Sequence seq;
457 	pthread_t th0;
458 	ssize_t bufsize = sizeof(CONTENTS);
459 	int fd;
460 
461 	expect_lookup(FUSE_ROOT_ID, RELPATH, ino, 0, seq);
462 	expect_open(ino, 0, 1);
463 
464 	/* Fill the data cache */
465 	fd = open(FULLPATH, O_RDWR);
466 	ASSERT_LE(0, fd);
467 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
468 
469 	expect_write(ino, 0, bufsize, CONTENTS);
470 	/*
471 	 * The FUSE protocol does not require an fsync here, but FreeBSD's
472 	 * bufobj_invalbuf sends it anyway
473 	 */
474 	maybe_expect_fsync(ino);
475 
476 	/* Evict the data cache */
477 	iia.mock = m_mock;
478 	iia.ino = ino;
479 	iia.off = 0;
480 	iia.len = 0;
481 	ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
482 		<< strerror(errno);
483 	pthread_join(th0, &thr0_value);
484 	EXPECT_EQ(0, (intptr_t)thr0_value);
485 
486 	leak(fd);
487 }
488 
489 TEST_F(NotifyWriteback, inval_inode_attrs_only)
490 {
491 	const static char FULLPATH[] = "mountpoint/foo";
492 	const static char RELPATH[] = "foo";
493 	const char CONTENTS[] = "abcdefgh";
494 	struct inval_inode_args iia;
495 	struct stat sb;
496 	uid_t uid = 12345;
497 	ino_t ino = 42;
498 	void *thr0_value;
499 	Sequence seq;
500 	pthread_t th0;
501 	ssize_t bufsize = sizeof(CONTENTS);
502 	int fd;
503 
504 	expect_lookup(FUSE_ROOT_ID, RELPATH, ino, 0, seq);
505 	expect_open(ino, 0, 1);
506 	EXPECT_CALL(*m_mock, process(
507 		ResultOf([=](auto in) {
508 			return (in.header.opcode == FUSE_WRITE);
509 		}, Eq(true)),
510 		_)
511 	).Times(0);
512 	EXPECT_CALL(*m_mock, process(
513 		ResultOf([=](auto in) {
514 			return (in.header.opcode == FUSE_GETATTR &&
515 				in.header.nodeid == ino);
516 		}, Eq(true)),
517 		_)
518 	).WillOnce(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
519 		SET_OUT_HEADER_LEN(out, attr);
520 		out.body.attr.attr.mode = S_IFREG | 0644;
521 		out.body.attr.attr_valid = UINT64_MAX;
522 		out.body.attr.attr.size = bufsize;
523 		out.body.attr.attr.uid = uid;
524 	})));
525 
526 	/* Fill the data cache */
527 	fd = open(FULLPATH, O_RDWR);
528 	ASSERT_LE(0, fd) << strerror(errno);
529 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
530 
531 	/* Evict the attributes, but not data cache */
532 	iia.mock = m_mock;
533 	iia.ino = ino;
534 	iia.off = -1;
535 	iia.len = 0;
536 	ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
537 		<< strerror(errno);
538 	pthread_join(th0, &thr0_value);
539 	EXPECT_EQ(0, (intptr_t)thr0_value);
540 
541 	/* cache attributes were been purged; this will trigger a new GETATTR */
542 	ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
543 	EXPECT_EQ(uid, sb.st_uid);
544 	EXPECT_EQ(bufsize, sb.st_size);
545 
546 	leak(fd);
547 }
548