xref: /freebsd/tests/sys/fs/fusefs/notify.cc (revision 252884ae7e4760f0e3cb45fdc2fff8fb952251ae)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2019 The FreeBSD Foundation
5  *
6  * This software was developed by BFF Storage Systems, LLC under sponsorship
7  * from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * $FreeBSD$
31  */
32 
33 extern "C" {
34 #include <sys/types.h>
35 #include <sys/sysctl.h>
36 
37 #include <fcntl.h>
38 #include <pthread.h>
39 }
40 
41 #include "mockfs.hh"
42 #include "utils.hh"
43 
44 using namespace testing;
45 
46 /*
47  * FUSE asynchonous notification
48  *
49  * FUSE servers can send unprompted notification messages for things like cache
50  * invalidation.  This file tests our client's handling of those messages.
51  */
52 
53 class Notify: public FuseTest {
54 public:
55 /* Ignore an optional FUSE_FSYNC */
56 void maybe_expect_fsync(uint64_t ino)
57 {
58 	EXPECT_CALL(*m_mock, process(
59 		ResultOf([=](auto in) {
60 			return (in.header.opcode == FUSE_FSYNC &&
61 				in.header.nodeid == ino);
62 		}, Eq(true)),
63 		_)
64 	).WillOnce(Invoke(ReturnErrno(0)));
65 }
66 
67 void expect_lookup(uint64_t parent, const char *relpath, uint64_t ino,
68 	off_t size, Sequence &seq)
69 {
70 	EXPECT_LOOKUP(parent, relpath)
71 	.InSequence(seq)
72 	.WillOnce(Invoke(
73 		ReturnImmediate([=](auto in __unused, auto& out) {
74 		SET_OUT_HEADER_LEN(out, entry);
75 		out.body.entry.attr.mode = S_IFREG | 0644;
76 		out.body.entry.nodeid = ino;
77 		out.body.entry.attr.ino = ino;
78 		out.body.entry.attr.nlink = 1;
79 		out.body.entry.attr.size = size;
80 		out.body.entry.attr_valid = UINT64_MAX;
81 		out.body.entry.entry_valid = UINT64_MAX;
82 	})));
83 }
84 };
85 
86 class NotifyWriteback: public Notify {
87 public:
88 virtual void SetUp() {
89 	m_init_flags |= FUSE_WRITEBACK_CACHE;
90 	m_async = true;
91 	Notify::SetUp();
92 	if (IsSkipped())
93 		return;
94 }
95 
96 void expect_write(uint64_t ino, uint64_t offset, uint64_t size,
97 	const void *contents)
98 {
99 	FuseTest::expect_write(ino, offset, size, size, 0, 0, contents);
100 }
101 
102 };
103 
104 struct inval_entry_args {
105 	MockFS		*mock;
106 	ino_t		parent;
107 	const char	*name;
108 	size_t		namelen;
109 };
110 
111 static void* inval_entry(void* arg) {
112 	const struct inval_entry_args *iea = (struct inval_entry_args*)arg;
113 	ssize_t r;
114 
115 	r = iea->mock->notify_inval_entry(iea->parent, iea->name, iea->namelen);
116 	if (r >= 0)
117 		return 0;
118 	else
119 		return (void*)(intptr_t)errno;
120 }
121 
122 struct inval_inode_args {
123 	MockFS		*mock;
124 	ino_t		ino;
125 	off_t		off;
126 	ssize_t		len;
127 };
128 
129 struct store_args {
130 	MockFS		*mock;
131 	ino_t		nodeid;
132 	off_t		offset;
133 	ssize_t		size;
134 	const void*	data;
135 };
136 
137 static void* inval_inode(void* arg) {
138 	const struct inval_inode_args *iia = (struct inval_inode_args*)arg;
139 	ssize_t r;
140 
141 	r = iia->mock->notify_inval_inode(iia->ino, iia->off, iia->len);
142 	if (r >= 0)
143 		return 0;
144 	else
145 		return (void*)(intptr_t)errno;
146 }
147 
148 static void* store(void* arg) {
149 	const struct store_args *sa = (struct store_args*)arg;
150 	ssize_t r;
151 
152 	r = sa->mock->notify_store(sa->nodeid, sa->offset, sa->data, sa->size);
153 	if (r >= 0)
154 		return 0;
155 	else
156 		return (void*)(intptr_t)errno;
157 }
158 
159 /* Invalidate a nonexistent entry */
160 TEST_F(Notify, inval_entry_nonexistent)
161 {
162 	const static char *name = "foo";
163 	struct inval_entry_args iea;
164 	void *thr0_value;
165 	pthread_t th0;
166 
167 	iea.mock = m_mock;
168 	iea.parent = FUSE_ROOT_ID;
169 	iea.name = name;
170 	iea.namelen = strlen(name);
171 	ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
172 		<< strerror(errno);
173 	pthread_join(th0, &thr0_value);
174 	/* It's not an error for an entry to not be cached */
175 	EXPECT_EQ(0, (intptr_t)thr0_value);
176 }
177 
178 /* Invalidate a cached entry */
179 TEST_F(Notify, inval_entry)
180 {
181 	const static char FULLPATH[] = "mountpoint/foo";
182 	const static char RELPATH[] = "foo";
183 	struct inval_entry_args iea;
184 	struct stat sb;
185 	void *thr0_value;
186 	uint64_t ino0 = 42;
187 	uint64_t ino1 = 43;
188 	Sequence seq;
189 	pthread_t th0;
190 
191 	expect_lookup(FUSE_ROOT_ID, RELPATH, ino0, 0, seq);
192 	expect_lookup(FUSE_ROOT_ID, RELPATH, ino1, 0, seq);
193 
194 	/* Fill the entry cache */
195 	ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
196 	EXPECT_EQ(ino0, sb.st_ino);
197 
198 	/* Now invalidate the entry */
199 	iea.mock = m_mock;
200 	iea.parent = FUSE_ROOT_ID;
201 	iea.name = RELPATH;
202 	iea.namelen = strlen(RELPATH);
203 	ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
204 		<< strerror(errno);
205 	pthread_join(th0, &thr0_value);
206 	EXPECT_EQ(0, (intptr_t)thr0_value);
207 
208 	/* The second lookup should return the alternate ino */
209 	ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
210 	EXPECT_EQ(ino1, sb.st_ino);
211 }
212 
213 /*
214  * Invalidate a cached entry beneath the root, which uses a slightly different
215  * code path.
216  */
217 TEST_F(Notify, inval_entry_below_root)
218 {
219 	const static char FULLPATH[] = "mountpoint/some_dir/foo";
220 	const static char DNAME[] = "some_dir";
221 	const static char FNAME[] = "foo";
222 	struct inval_entry_args iea;
223 	struct stat sb;
224 	void *thr0_value;
225 	uint64_t dir_ino = 41;
226 	uint64_t ino0 = 42;
227 	uint64_t ino1 = 43;
228 	Sequence seq;
229 	pthread_t th0;
230 
231 	EXPECT_LOOKUP(FUSE_ROOT_ID, DNAME)
232 	.WillOnce(Invoke(
233 		ReturnImmediate([=](auto in __unused, auto& out) {
234 		SET_OUT_HEADER_LEN(out, entry);
235 		out.body.entry.attr.mode = S_IFDIR | 0755;
236 		out.body.entry.nodeid = dir_ino;
237 		out.body.entry.attr.nlink = 2;
238 		out.body.entry.attr_valid = UINT64_MAX;
239 		out.body.entry.entry_valid = UINT64_MAX;
240 	})));
241 	expect_lookup(dir_ino, FNAME, ino0, 0, seq);
242 	expect_lookup(dir_ino, FNAME, ino1, 0, seq);
243 
244 	/* Fill the entry cache */
245 	ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
246 	EXPECT_EQ(ino0, sb.st_ino);
247 
248 	/* Now invalidate the entry */
249 	iea.mock = m_mock;
250 	iea.parent = dir_ino;
251 	iea.name = FNAME;
252 	iea.namelen = strlen(FNAME);
253 	ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
254 		<< strerror(errno);
255 	pthread_join(th0, &thr0_value);
256 	EXPECT_EQ(0, (intptr_t)thr0_value);
257 
258 	/* The second lookup should return the alternate ino */
259 	ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
260 	EXPECT_EQ(ino1, sb.st_ino);
261 }
262 
263 /* Invalidating an entry invalidates the parent directory's attributes */
264 TEST_F(Notify, inval_entry_invalidates_parent_attrs)
265 {
266 	const static char FULLPATH[] = "mountpoint/foo";
267 	const static char RELPATH[] = "foo";
268 	struct inval_entry_args iea;
269 	struct stat sb;
270 	void *thr0_value;
271 	uint64_t ino = 42;
272 	Sequence seq;
273 	pthread_t th0;
274 
275 	expect_lookup(FUSE_ROOT_ID, RELPATH, ino, 0, seq);
276 	EXPECT_CALL(*m_mock, process(
277 		ResultOf([=](auto in) {
278 			return (in.header.opcode == FUSE_GETATTR &&
279 				in.header.nodeid == FUSE_ROOT_ID);
280 		}, Eq(true)),
281 		_)
282 	).Times(2)
283 	.WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
284 		SET_OUT_HEADER_LEN(out, attr);
285 		out.body.attr.attr.mode = S_IFDIR | 0755;
286 		out.body.attr.attr_valid = UINT64_MAX;
287 	})));
288 
289 	/* Fill the attr and entry cache */
290 	ASSERT_EQ(0, stat("mountpoint", &sb)) << strerror(errno);
291 	ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
292 
293 	/* Now invalidate the entry */
294 	iea.mock = m_mock;
295 	iea.parent = FUSE_ROOT_ID;
296 	iea.name = RELPATH;
297 	iea.namelen = strlen(RELPATH);
298 	ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
299 		<< strerror(errno);
300 	pthread_join(th0, &thr0_value);
301 	EXPECT_EQ(0, (intptr_t)thr0_value);
302 
303 	/* /'s attribute cache should be cleared */
304 	ASSERT_EQ(0, stat("mountpoint", &sb)) << strerror(errno);
305 }
306 
307 
308 TEST_F(Notify, inval_inode_nonexistent)
309 {
310 	struct inval_inode_args iia;
311 	ino_t ino = 42;
312 	void *thr0_value;
313 	pthread_t th0;
314 
315 	iia.mock = m_mock;
316 	iia.ino = ino;
317 	iia.off = 0;
318 	iia.len = 0;
319 	ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
320 		<< strerror(errno);
321 	pthread_join(th0, &thr0_value);
322 	/* It's not an error for an inode to not be cached */
323 	EXPECT_EQ(0, (intptr_t)thr0_value);
324 }
325 
326 TEST_F(Notify, inval_inode_with_clean_cache)
327 {
328 	const static char FULLPATH[] = "mountpoint/foo";
329 	const static char RELPATH[] = "foo";
330 	const char CONTENTS0[] = "abcdefgh";
331 	const char CONTENTS1[] = "ijklmnopqrstuvwxyz";
332 	struct inval_inode_args iia;
333 	struct stat sb;
334 	ino_t ino = 42;
335 	void *thr0_value;
336 	Sequence seq;
337 	uid_t uid = 12345;
338 	pthread_t th0;
339 	ssize_t size0 = sizeof(CONTENTS0);
340 	ssize_t size1 = sizeof(CONTENTS1);
341 	char buf[80];
342 	int fd;
343 
344 	expect_lookup(FUSE_ROOT_ID, RELPATH, ino, size0, seq);
345 	expect_open(ino, 0, 1);
346 	EXPECT_CALL(*m_mock, process(
347 		ResultOf([=](auto in) {
348 			return (in.header.opcode == FUSE_GETATTR &&
349 				in.header.nodeid == ino);
350 		}, Eq(true)),
351 		_)
352 	).WillOnce(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
353 		SET_OUT_HEADER_LEN(out, attr);
354 		out.body.attr.attr.mode = S_IFREG | 0644;
355 		out.body.attr.attr_valid = UINT64_MAX;
356 		out.body.attr.attr.size = size1;
357 		out.body.attr.attr.uid = uid;
358 	})));
359 	expect_read(ino, 0, size0, size0, CONTENTS0);
360 	expect_read(ino, 0, size1, size1, CONTENTS1);
361 
362 	/* Fill the data cache */
363 	fd = open(FULLPATH, O_RDWR);
364 	ASSERT_LE(0, fd) << strerror(errno);
365 	ASSERT_EQ(size0, read(fd, buf, size0)) << strerror(errno);
366 	EXPECT_EQ(0, memcmp(buf, CONTENTS0, size0));
367 
368 	/* Evict the data cache */
369 	iia.mock = m_mock;
370 	iia.ino = ino;
371 	iia.off = 0;
372 	iia.len = 0;
373 	ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
374 		<< strerror(errno);
375 	pthread_join(th0, &thr0_value);
376 	EXPECT_EQ(0, (intptr_t)thr0_value);
377 
378 	/* cache attributes were purged; this will trigger a new GETATTR */
379 	ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
380 	EXPECT_EQ(uid, sb.st_uid);
381 	EXPECT_EQ(size1, sb.st_size);
382 
383 	/* This read should not be serviced by cache */
384 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
385 	ASSERT_EQ(size1, read(fd, buf, size1)) << strerror(errno);
386 	EXPECT_EQ(0, memcmp(buf, CONTENTS1, size1));
387 
388 	leak(fd);
389 }
390 
391 /* FUSE_NOTIFY_STORE with a file that's not in the entry cache */
392 /* disabled because FUSE_NOTIFY_STORE is not yet implemented */
393 TEST_F(Notify, DISABLED_store_nonexistent)
394 {
395 	struct store_args sa;
396 	ino_t ino = 42;
397 	void *thr0_value;
398 	pthread_t th0;
399 
400 	sa.mock = m_mock;
401 	sa.nodeid = ino;
402 	sa.offset = 0;
403 	sa.size = 0;
404 	ASSERT_EQ(0, pthread_create(&th0, NULL, store, &sa)) << strerror(errno);
405 	pthread_join(th0, &thr0_value);
406 	/* It's not an error for a file to be unknown to the kernel */
407 	EXPECT_EQ(0, (intptr_t)thr0_value);
408 }
409 
410 /* Store data into for a file that does not yet have anything cached */
411 /* disabled because FUSE_NOTIFY_STORE is not yet implemented */
412 TEST_F(Notify, DISABLED_store_with_blank_cache)
413 {
414 	const static char FULLPATH[] = "mountpoint/foo";
415 	const static char RELPATH[] = "foo";
416 	const char CONTENTS1[] = "ijklmnopqrstuvwxyz";
417 	struct store_args sa;
418 	ino_t ino = 42;
419 	void *thr0_value;
420 	Sequence seq;
421 	pthread_t th0;
422 	ssize_t size1 = sizeof(CONTENTS1);
423 	char buf[80];
424 	int fd;
425 
426 	expect_lookup(FUSE_ROOT_ID, RELPATH, ino, size1, seq);
427 	expect_open(ino, 0, 1);
428 
429 	/* Fill the data cache */
430 	fd = open(FULLPATH, O_RDWR);
431 	ASSERT_LE(0, fd) << strerror(errno);
432 
433 	/* Evict the data cache */
434 	sa.mock = m_mock;
435 	sa.nodeid = ino;
436 	sa.offset = 0;
437 	sa.size = size1;
438 	sa.data = (const void*)CONTENTS1;
439 	ASSERT_EQ(0, pthread_create(&th0, NULL, store, &sa)) << strerror(errno);
440 	pthread_join(th0, &thr0_value);
441 	EXPECT_EQ(0, (intptr_t)thr0_value);
442 
443 	/* This read should be serviced by cache */
444 	ASSERT_EQ(size1, read(fd, buf, size1)) << strerror(errno);
445 	EXPECT_EQ(0, memcmp(buf, CONTENTS1, size1));
446 
447 	leak(fd);
448 }
449 
450 TEST_F(NotifyWriteback, inval_inode_with_dirty_cache)
451 {
452 	const static char FULLPATH[] = "mountpoint/foo";
453 	const static char RELPATH[] = "foo";
454 	const char CONTENTS[] = "abcdefgh";
455 	struct inval_inode_args iia;
456 	ino_t ino = 42;
457 	void *thr0_value;
458 	Sequence seq;
459 	pthread_t th0;
460 	ssize_t bufsize = sizeof(CONTENTS);
461 	int fd;
462 
463 	expect_lookup(FUSE_ROOT_ID, RELPATH, ino, 0, seq);
464 	expect_open(ino, 0, 1);
465 
466 	/* Fill the data cache */
467 	fd = open(FULLPATH, O_RDWR);
468 	ASSERT_LE(0, fd);
469 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
470 
471 	expect_write(ino, 0, bufsize, CONTENTS);
472 	/*
473 	 * The FUSE protocol does not require an fsync here, but FreeBSD's
474 	 * bufobj_invalbuf sends it anyway
475 	 */
476 	maybe_expect_fsync(ino);
477 
478 	/* Evict the data cache */
479 	iia.mock = m_mock;
480 	iia.ino = ino;
481 	iia.off = 0;
482 	iia.len = 0;
483 	ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
484 		<< strerror(errno);
485 	pthread_join(th0, &thr0_value);
486 	EXPECT_EQ(0, (intptr_t)thr0_value);
487 
488 	leak(fd);
489 }
490 
491 TEST_F(NotifyWriteback, inval_inode_attrs_only)
492 {
493 	const static char FULLPATH[] = "mountpoint/foo";
494 	const static char RELPATH[] = "foo";
495 	const char CONTENTS[] = "abcdefgh";
496 	struct inval_inode_args iia;
497 	struct stat sb;
498 	uid_t uid = 12345;
499 	ino_t ino = 42;
500 	void *thr0_value;
501 	Sequence seq;
502 	pthread_t th0;
503 	ssize_t bufsize = sizeof(CONTENTS);
504 	int fd;
505 
506 	expect_lookup(FUSE_ROOT_ID, RELPATH, ino, 0, seq);
507 	expect_open(ino, 0, 1);
508 	EXPECT_CALL(*m_mock, process(
509 		ResultOf([=](auto in) {
510 			return (in.header.opcode == FUSE_WRITE);
511 		}, Eq(true)),
512 		_)
513 	).Times(0);
514 	EXPECT_CALL(*m_mock, process(
515 		ResultOf([=](auto in) {
516 			return (in.header.opcode == FUSE_GETATTR &&
517 				in.header.nodeid == ino);
518 		}, Eq(true)),
519 		_)
520 	).WillOnce(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
521 		SET_OUT_HEADER_LEN(out, attr);
522 		out.body.attr.attr.mode = S_IFREG | 0644;
523 		out.body.attr.attr_valid = UINT64_MAX;
524 		out.body.attr.attr.size = bufsize;
525 		out.body.attr.attr.uid = uid;
526 	})));
527 
528 	/* Fill the data cache */
529 	fd = open(FULLPATH, O_RDWR);
530 	ASSERT_LE(0, fd) << strerror(errno);
531 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
532 
533 	/* Evict the attributes, but not data cache */
534 	iia.mock = m_mock;
535 	iia.ino = ino;
536 	iia.off = -1;
537 	iia.len = 0;
538 	ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
539 		<< strerror(errno);
540 	pthread_join(th0, &thr0_value);
541 	EXPECT_EQ(0, (intptr_t)thr0_value);
542 
543 	/* cache attributes were been purged; this will trigger a new GETATTR */
544 	ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
545 	EXPECT_EQ(uid, sb.st_uid);
546 	EXPECT_EQ(bufsize, sb.st_size);
547 
548 	leak(fd);
549 }
550