xref: /linux/fs/cachefiles/cache.c (revision b690490d6d466972ade172ee2e7f6ffa49e7e910)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Manage high-level VFS aspects of a cache.
3  *
4  * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/statfs.h>
10 #include <linux/namei.h>
11 #include "internal.h"
12 
13 /*
14  * Bring a cache online.
15  */
16 int cachefiles_add_cache(struct cachefiles_cache *cache)
17 {
18 	struct fscache_cache *cache_cookie;
19 	struct path path;
20 	struct kstatfs stats;
21 	struct dentry *graveyard, *cachedir, *root;
22 	const struct cred *saved_cred;
23 	int ret;
24 
25 	_enter("");
26 
27 	cache_cookie = fscache_acquire_cache(cache->tag);
28 	if (IS_ERR(cache_cookie))
29 		return PTR_ERR(cache_cookie);
30 
31 	/* we want to work under the module's security ID */
32 	ret = cachefiles_get_security_ID(cache);
33 	if (ret < 0)
34 		goto error_getsec;
35 
36 	cachefiles_begin_secure(cache, &saved_cred);
37 
38 	/* look up the directory at the root of the cache */
39 	ret = kern_path(cache->rootdirname, LOOKUP_DIRECTORY, &path);
40 	if (ret < 0)
41 		goto error_open_root;
42 
43 	cache->mnt = path.mnt;
44 	root = path.dentry;
45 
46 	ret = -EINVAL;
47 	if (is_idmapped_mnt(path.mnt)) {
48 		pr_warn("File cache on idmapped mounts not supported");
49 		goto error_unsupported;
50 	}
51 
52 	/* check parameters */
53 	ret = -EOPNOTSUPP;
54 	if (d_is_negative(root) ||
55 	    !d_backing_inode(root)->i_op->lookup ||
56 	    !d_backing_inode(root)->i_op->mkdir ||
57 	    !(d_backing_inode(root)->i_opflags & IOP_XATTR) ||
58 	    !root->d_sb->s_op->statfs ||
59 	    !root->d_sb->s_op->sync_fs ||
60 	    root->d_sb->s_blocksize > PAGE_SIZE)
61 		goto error_unsupported;
62 
63 	ret = -EROFS;
64 	if (sb_rdonly(root->d_sb))
65 		goto error_unsupported;
66 
67 	/* determine the security of the on-disk cache as this governs
68 	 * security ID of files we create */
69 	ret = cachefiles_determine_cache_security(cache, root, &saved_cred);
70 	if (ret < 0)
71 		goto error_unsupported;
72 
73 	/* get the cache size and blocksize */
74 	ret = vfs_statfs(&path, &stats);
75 	if (ret < 0)
76 		goto error_unsupported;
77 
78 	ret = -ERANGE;
79 	if (stats.f_bsize <= 0)
80 		goto error_unsupported;
81 
82 	ret = -EOPNOTSUPP;
83 	if (stats.f_bsize > PAGE_SIZE)
84 		goto error_unsupported;
85 
86 	cache->bsize = stats.f_bsize;
87 	cache->bshift = 0;
88 	if (stats.f_bsize < PAGE_SIZE)
89 		cache->bshift = PAGE_SHIFT - ilog2(stats.f_bsize);
90 
91 	_debug("blksize %u (shift %u)",
92 	       cache->bsize, cache->bshift);
93 
94 	_debug("size %llu, avail %llu",
95 	       (unsigned long long) stats.f_blocks,
96 	       (unsigned long long) stats.f_bavail);
97 
98 	/* set up caching limits */
99 	do_div(stats.f_files, 100);
100 	cache->fstop = stats.f_files * cache->fstop_percent;
101 	cache->fcull = stats.f_files * cache->fcull_percent;
102 	cache->frun  = stats.f_files * cache->frun_percent;
103 
104 	_debug("limits {%llu,%llu,%llu} files",
105 	       (unsigned long long) cache->frun,
106 	       (unsigned long long) cache->fcull,
107 	       (unsigned long long) cache->fstop);
108 
109 	stats.f_blocks >>= cache->bshift;
110 	do_div(stats.f_blocks, 100);
111 	cache->bstop = stats.f_blocks * cache->bstop_percent;
112 	cache->bcull = stats.f_blocks * cache->bcull_percent;
113 	cache->brun  = stats.f_blocks * cache->brun_percent;
114 
115 	_debug("limits {%llu,%llu,%llu} blocks",
116 	       (unsigned long long) cache->brun,
117 	       (unsigned long long) cache->bcull,
118 	       (unsigned long long) cache->bstop);
119 
120 	/* get the cache directory and check its type */
121 	cachedir = cachefiles_get_directory(cache, root, "cache", NULL);
122 	if (IS_ERR(cachedir)) {
123 		ret = PTR_ERR(cachedir);
124 		goto error_unsupported;
125 	}
126 
127 	cache->store = cachedir;
128 
129 	/* get the graveyard directory */
130 	graveyard = cachefiles_get_directory(cache, root, "graveyard", NULL);
131 	if (IS_ERR(graveyard)) {
132 		ret = PTR_ERR(graveyard);
133 		goto error_unsupported;
134 	}
135 
136 	cache->graveyard = graveyard;
137 	cache->cache = cache_cookie;
138 
139 	ret = fscache_add_cache(cache_cookie, &cachefiles_cache_ops, cache);
140 	if (ret < 0)
141 		goto error_add_cache;
142 
143 	/* done */
144 	set_bit(CACHEFILES_READY, &cache->flags);
145 	dput(root);
146 
147 	pr_info("File cache on %s registered\n", cache_cookie->name);
148 
149 	/* check how much space the cache has */
150 	cachefiles_has_space(cache, 0, 0, cachefiles_has_space_check);
151 	cachefiles_end_secure(cache, saved_cred);
152 	_leave(" = 0 [%px]", cache->cache);
153 	return 0;
154 
155 error_add_cache:
156 	cachefiles_put_directory(cache->graveyard);
157 	cache->graveyard = NULL;
158 error_unsupported:
159 	cachefiles_put_directory(cache->store);
160 	cache->store = NULL;
161 	mntput(cache->mnt);
162 	cache->mnt = NULL;
163 	dput(root);
164 error_open_root:
165 	cachefiles_end_secure(cache, saved_cred);
166 error_getsec:
167 	fscache_relinquish_cache(cache_cookie);
168 	cache->cache = NULL;
169 	pr_err("Failed to register: %d\n", ret);
170 	return ret;
171 }
172 
173 /*
174  * See if we have space for a number of pages and/or a number of files in the
175  * cache
176  */
177 int cachefiles_has_space(struct cachefiles_cache *cache,
178 			 unsigned fnr, unsigned bnr,
179 			 enum cachefiles_has_space_for reason)
180 {
181 	struct kstatfs stats;
182 	u64 b_avail, b_writing;
183 	int ret;
184 
185 	struct path path = {
186 		.mnt	= cache->mnt,
187 		.dentry	= cache->mnt->mnt_root,
188 	};
189 
190 	//_enter("{%llu,%llu,%llu,%llu,%llu,%llu},%u,%u",
191 	//       (unsigned long long) cache->frun,
192 	//       (unsigned long long) cache->fcull,
193 	//       (unsigned long long) cache->fstop,
194 	//       (unsigned long long) cache->brun,
195 	//       (unsigned long long) cache->bcull,
196 	//       (unsigned long long) cache->bstop,
197 	//       fnr, bnr);
198 
199 	/* find out how many pages of blockdev are available */
200 	memset(&stats, 0, sizeof(stats));
201 
202 	ret = vfs_statfs(&path, &stats);
203 	if (ret < 0) {
204 		trace_cachefiles_vfs_error(NULL, d_inode(path.dentry), ret,
205 					   cachefiles_trace_statfs_error);
206 		if (ret == -EIO)
207 			cachefiles_io_error(cache, "statfs failed");
208 		_leave(" = %d", ret);
209 		return ret;
210 	}
211 
212 	b_avail = stats.f_bavail >> cache->bshift;
213 	b_writing = atomic_long_read(&cache->b_writing);
214 	if (b_avail > b_writing)
215 		b_avail -= b_writing;
216 	else
217 		b_avail = 0;
218 
219 	//_debug("avail %llu,%llu",
220 	//       (unsigned long long)stats.f_ffree,
221 	//       (unsigned long long)b_avail);
222 
223 	/* see if there is sufficient space */
224 	if (stats.f_ffree > fnr)
225 		stats.f_ffree -= fnr;
226 	else
227 		stats.f_ffree = 0;
228 
229 	if (b_avail > bnr)
230 		b_avail -= bnr;
231 	else
232 		b_avail = 0;
233 
234 	ret = -ENOBUFS;
235 	if (stats.f_ffree < cache->fstop ||
236 	    b_avail < cache->bstop)
237 		goto stop_and_begin_cull;
238 
239 	ret = 0;
240 	if (stats.f_ffree < cache->fcull ||
241 	    b_avail < cache->bcull)
242 		goto begin_cull;
243 
244 	if (test_bit(CACHEFILES_CULLING, &cache->flags) &&
245 	    stats.f_ffree >= cache->frun &&
246 	    b_avail >= cache->brun &&
247 	    test_and_clear_bit(CACHEFILES_CULLING, &cache->flags)
248 	    ) {
249 		_debug("cease culling");
250 		cachefiles_state_changed(cache);
251 	}
252 
253 	//_leave(" = 0");
254 	return 0;
255 
256 stop_and_begin_cull:
257 	switch (reason) {
258 	case cachefiles_has_space_for_write:
259 		fscache_count_no_write_space();
260 		break;
261 	case cachefiles_has_space_for_create:
262 		fscache_count_no_create_space();
263 		break;
264 	default:
265 		break;
266 	}
267 begin_cull:
268 	if (!test_and_set_bit(CACHEFILES_CULLING, &cache->flags)) {
269 		_debug("### CULL CACHE ###");
270 		cachefiles_state_changed(cache);
271 	}
272 
273 	_leave(" = %d", ret);
274 	return ret;
275 }
276 
277 /*
278  * Mark all the objects as being out of service and queue them all for cleanup.
279  */
280 static void cachefiles_withdraw_objects(struct cachefiles_cache *cache)
281 {
282 	struct cachefiles_object *object;
283 	unsigned int count = 0;
284 
285 	_enter("");
286 
287 	spin_lock(&cache->object_list_lock);
288 
289 	while (!list_empty(&cache->object_list)) {
290 		object = list_first_entry(&cache->object_list,
291 					  struct cachefiles_object, cache_link);
292 		cachefiles_see_object(object, cachefiles_obj_see_withdrawal);
293 		list_del_init(&object->cache_link);
294 		fscache_withdraw_cookie(object->cookie);
295 		count++;
296 		if ((count & 63) == 0) {
297 			spin_unlock(&cache->object_list_lock);
298 			cond_resched();
299 			spin_lock(&cache->object_list_lock);
300 		}
301 	}
302 
303 	spin_unlock(&cache->object_list_lock);
304 	_leave(" [%u objs]", count);
305 }
306 
307 /*
308  * Withdraw volumes.
309  */
310 static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache)
311 {
312 	_enter("");
313 
314 	for (;;) {
315 		struct cachefiles_volume *volume = NULL;
316 
317 		spin_lock(&cache->object_list_lock);
318 		if (!list_empty(&cache->volumes)) {
319 			volume = list_first_entry(&cache->volumes,
320 						  struct cachefiles_volume, cache_link);
321 			list_del_init(&volume->cache_link);
322 		}
323 		spin_unlock(&cache->object_list_lock);
324 		if (!volume)
325 			break;
326 
327 		cachefiles_withdraw_volume(volume);
328 	}
329 
330 	_leave("");
331 }
332 
333 /*
334  * Sync a cache to backing disk.
335  */
336 static void cachefiles_sync_cache(struct cachefiles_cache *cache)
337 {
338 	const struct cred *saved_cred;
339 	int ret;
340 
341 	_enter("%s", cache->cache->name);
342 
343 	/* make sure all pages pinned by operations on behalf of the netfs are
344 	 * written to disc */
345 	cachefiles_begin_secure(cache, &saved_cred);
346 	down_read(&cache->mnt->mnt_sb->s_umount);
347 	ret = sync_filesystem(cache->mnt->mnt_sb);
348 	up_read(&cache->mnt->mnt_sb->s_umount);
349 	cachefiles_end_secure(cache, saved_cred);
350 
351 	if (ret == -EIO)
352 		cachefiles_io_error(cache,
353 				    "Attempt to sync backing fs superblock returned error %d",
354 				    ret);
355 }
356 
357 /*
358  * Withdraw cache objects.
359  */
360 void cachefiles_withdraw_cache(struct cachefiles_cache *cache)
361 {
362 	struct fscache_cache *fscache = cache->cache;
363 
364 	pr_info("File cache on %s unregistering\n", fscache->name);
365 
366 	fscache_withdraw_cache(fscache);
367 
368 	/* we now have to destroy all the active objects pertaining to this
369 	 * cache - which we do by passing them off to thread pool to be
370 	 * disposed of */
371 	cachefiles_withdraw_objects(cache);
372 	fscache_wait_for_objects(fscache);
373 
374 	cachefiles_withdraw_volumes(cache);
375 	cachefiles_sync_cache(cache);
376 	cache->cache = NULL;
377 	fscache_relinquish_cache(fscache);
378 }
379