xref: /linux/fs/nfs/fscache.c (revision 6e94dbc7a4e49a028b81302d755bba1a518f973b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* NFS filesystem cache interface
3  *
4  * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/mm.h>
12 #include <linux/nfs_fs.h>
13 #include <linux/nfs_fs_sb.h>
14 #include <linux/in6.h>
15 #include <linux/seq_file.h>
16 #include <linux/slab.h>
17 #include <linux/iversion.h>
18 
19 #include "internal.h"
20 #include "iostat.h"
21 #include "fscache.h"
22 
23 #define NFSDBG_FACILITY		NFSDBG_FSCACHE
24 
25 static struct rb_root nfs_fscache_keys = RB_ROOT;
26 static DEFINE_SPINLOCK(nfs_fscache_keys_lock);
27 
28 /*
29  * Layout of the key for an NFS server cache object.
30  */
31 struct nfs_server_key {
32 	struct {
33 		uint16_t	nfsversion;		/* NFS protocol version */
34 		uint32_t	minorversion;		/* NFSv4 minor version */
35 		uint16_t	family;			/* address family */
36 		__be16		port;			/* IP port */
37 	} hdr;
38 	union {
39 		struct in_addr	ipv4_addr;	/* IPv4 address */
40 		struct in6_addr ipv6_addr;	/* IPv6 address */
41 	};
42 } __packed;
43 
44 /*
45  * Get the per-client index cookie for an NFS client if the appropriate mount
46  * flag was set
47  * - We always try and get an index cookie for the client, but get filehandle
48  *   cookies on a per-superblock basis, depending on the mount flags
49  */
50 void nfs_fscache_get_client_cookie(struct nfs_client *clp)
51 {
52 	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &clp->cl_addr;
53 	const struct sockaddr_in *sin = (struct sockaddr_in *) &clp->cl_addr;
54 	struct nfs_server_key key;
55 	uint16_t len = sizeof(key.hdr);
56 
57 	memset(&key, 0, sizeof(key));
58 	key.hdr.nfsversion = clp->rpc_ops->version;
59 	key.hdr.minorversion = clp->cl_minorversion;
60 	key.hdr.family = clp->cl_addr.ss_family;
61 
62 	switch (clp->cl_addr.ss_family) {
63 	case AF_INET:
64 		key.hdr.port = sin->sin_port;
65 		key.ipv4_addr = sin->sin_addr;
66 		len += sizeof(key.ipv4_addr);
67 		break;
68 
69 	case AF_INET6:
70 		key.hdr.port = sin6->sin6_port;
71 		key.ipv6_addr = sin6->sin6_addr;
72 		len += sizeof(key.ipv6_addr);
73 		break;
74 
75 	default:
76 		printk(KERN_WARNING "NFS: Unknown network family '%d'\n",
77 		       clp->cl_addr.ss_family);
78 		clp->fscache = NULL;
79 		return;
80 	}
81 
82 	/* create a cache index for looking up filehandles */
83 	clp->fscache = fscache_acquire_cookie(nfs_fscache_netfs.primary_index,
84 					      &nfs_fscache_server_index_def,
85 					      &key, len,
86 					      NULL, 0,
87 					      clp, 0, true);
88 	dfprintk(FSCACHE, "NFS: get client cookie (0x%p/0x%p)\n",
89 		 clp, clp->fscache);
90 }
91 
92 /*
93  * Dispose of a per-client cookie
94  */
95 void nfs_fscache_release_client_cookie(struct nfs_client *clp)
96 {
97 	dfprintk(FSCACHE, "NFS: releasing client cookie (0x%p/0x%p)\n",
98 		 clp, clp->fscache);
99 
100 	fscache_relinquish_cookie(clp->fscache, NULL, false);
101 	clp->fscache = NULL;
102 }
103 
104 /*
105  * Get the cache cookie for an NFS superblock.  We have to handle
106  * uniquification here because the cache doesn't do it for us.
107  *
108  * The default uniquifier is just an empty string, but it may be overridden
109  * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
110  * superblock across an automount point of some nature.
111  */
112 void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen)
113 {
114 	struct nfs_fscache_key *key, *xkey;
115 	struct nfs_server *nfss = NFS_SB(sb);
116 	struct rb_node **p, *parent;
117 	int diff;
118 
119 	nfss->fscache_key = NULL;
120 	nfss->fscache = NULL;
121 	if (!uniq) {
122 		uniq = "";
123 		ulen = 1;
124 	}
125 
126 	key = kzalloc(sizeof(*key) + ulen, GFP_KERNEL);
127 	if (!key)
128 		return;
129 
130 	key->nfs_client = nfss->nfs_client;
131 	key->key.super.s_flags = sb->s_flags & NFS_SB_MASK;
132 	key->key.nfs_server.flags = nfss->flags;
133 	key->key.nfs_server.rsize = nfss->rsize;
134 	key->key.nfs_server.wsize = nfss->wsize;
135 	key->key.nfs_server.acregmin = nfss->acregmin;
136 	key->key.nfs_server.acregmax = nfss->acregmax;
137 	key->key.nfs_server.acdirmin = nfss->acdirmin;
138 	key->key.nfs_server.acdirmax = nfss->acdirmax;
139 	key->key.nfs_server.fsid = nfss->fsid;
140 	key->key.rpc_auth.au_flavor = nfss->client->cl_auth->au_flavor;
141 
142 	key->key.uniq_len = ulen;
143 	memcpy(key->key.uniquifier, uniq, ulen);
144 
145 	spin_lock(&nfs_fscache_keys_lock);
146 	p = &nfs_fscache_keys.rb_node;
147 	parent = NULL;
148 	while (*p) {
149 		parent = *p;
150 		xkey = rb_entry(parent, struct nfs_fscache_key, node);
151 
152 		if (key->nfs_client < xkey->nfs_client)
153 			goto go_left;
154 		if (key->nfs_client > xkey->nfs_client)
155 			goto go_right;
156 
157 		diff = memcmp(&key->key, &xkey->key, sizeof(key->key));
158 		if (diff < 0)
159 			goto go_left;
160 		if (diff > 0)
161 			goto go_right;
162 
163 		if (key->key.uniq_len == 0)
164 			goto non_unique;
165 		diff = memcmp(key->key.uniquifier,
166 			      xkey->key.uniquifier,
167 			      key->key.uniq_len);
168 		if (diff < 0)
169 			goto go_left;
170 		if (diff > 0)
171 			goto go_right;
172 		goto non_unique;
173 
174 	go_left:
175 		p = &(*p)->rb_left;
176 		continue;
177 	go_right:
178 		p = &(*p)->rb_right;
179 	}
180 
181 	rb_link_node(&key->node, parent, p);
182 	rb_insert_color(&key->node, &nfs_fscache_keys);
183 	spin_unlock(&nfs_fscache_keys_lock);
184 	nfss->fscache_key = key;
185 
186 	/* create a cache index for looking up filehandles */
187 	nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache,
188 					       &nfs_fscache_super_index_def,
189 					       &key->key,
190 					       sizeof(key->key) + ulen,
191 					       NULL, 0,
192 					       nfss, 0, true);
193 	dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n",
194 		 nfss, nfss->fscache);
195 	return;
196 
197 non_unique:
198 	spin_unlock(&nfs_fscache_keys_lock);
199 	kfree(key);
200 	nfss->fscache_key = NULL;
201 	nfss->fscache = NULL;
202 	printk(KERN_WARNING "NFS:"
203 	       " Cache request denied due to non-unique superblock keys\n");
204 }
205 
206 /*
207  * release a per-superblock cookie
208  */
209 void nfs_fscache_release_super_cookie(struct super_block *sb)
210 {
211 	struct nfs_server *nfss = NFS_SB(sb);
212 
213 	dfprintk(FSCACHE, "NFS: releasing superblock cookie (0x%p/0x%p)\n",
214 		 nfss, nfss->fscache);
215 
216 	fscache_relinquish_cookie(nfss->fscache, NULL, false);
217 	nfss->fscache = NULL;
218 
219 	if (nfss->fscache_key) {
220 		spin_lock(&nfs_fscache_keys_lock);
221 		rb_erase(&nfss->fscache_key->node, &nfs_fscache_keys);
222 		spin_unlock(&nfs_fscache_keys_lock);
223 		kfree(nfss->fscache_key);
224 		nfss->fscache_key = NULL;
225 	}
226 }
227 
228 static void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata,
229 				  struct nfs_inode *nfsi)
230 {
231 	memset(auxdata, 0, sizeof(*auxdata));
232 	auxdata->mtime_sec  = nfsi->vfs_inode.i_mtime.tv_sec;
233 	auxdata->mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
234 	auxdata->ctime_sec  = nfsi->vfs_inode.i_ctime.tv_sec;
235 	auxdata->ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
236 
237 	if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
238 		auxdata->change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
239 }
240 
241 /*
242  * Initialise the per-inode cache cookie pointer for an NFS inode.
243  */
244 void nfs_fscache_init_inode(struct inode *inode)
245 {
246 	struct nfs_fscache_inode_auxdata auxdata;
247 	struct nfs_server *nfss = NFS_SERVER(inode);
248 	struct nfs_inode *nfsi = NFS_I(inode);
249 
250 	nfsi->fscache = NULL;
251 	if (!(nfss->fscache && S_ISREG(inode->i_mode)))
252 		return;
253 
254 	nfs_fscache_update_auxdata(&auxdata, nfsi);
255 
256 	nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache,
257 					       &nfs_fscache_inode_object_def,
258 					       nfsi->fh.data, nfsi->fh.size,
259 					       &auxdata, sizeof(auxdata),
260 					       nfsi, nfsi->vfs_inode.i_size, false);
261 }
262 
263 /*
264  * Release a per-inode cookie.
265  */
266 void nfs_fscache_clear_inode(struct inode *inode)
267 {
268 	struct nfs_fscache_inode_auxdata auxdata;
269 	struct nfs_inode *nfsi = NFS_I(inode);
270 	struct fscache_cookie *cookie = nfs_i_fscache(inode);
271 
272 	dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie);
273 
274 	nfs_fscache_update_auxdata(&auxdata, nfsi);
275 	fscache_relinquish_cookie(cookie, &auxdata, false);
276 	nfsi->fscache = NULL;
277 }
278 
279 static bool nfs_fscache_can_enable(void *data)
280 {
281 	struct inode *inode = data;
282 
283 	return !inode_is_open_for_write(inode);
284 }
285 
286 /*
287  * Enable or disable caching for a file that is being opened as appropriate.
288  * The cookie is allocated when the inode is initialised, but is not enabled at
289  * that time.  Enablement is deferred to file-open time to avoid stat() and
290  * access() thrashing the cache.
291  *
292  * For now, with NFS, only regular files that are open read-only will be able
293  * to use the cache.
294  *
295  * We enable the cache for an inode if we open it read-only and it isn't
296  * currently open for writing.  We disable the cache if the inode is open
297  * write-only.
298  *
299  * The caller uses the file struct to pin i_writecount on the inode before
300  * calling us when a file is opened for writing, so we can make use of that.
301  *
302  * Note that this may be invoked multiple times in parallel by parallel
303  * nfs_open() functions.
304  */
305 void nfs_fscache_open_file(struct inode *inode, struct file *filp)
306 {
307 	struct nfs_fscache_inode_auxdata auxdata;
308 	struct nfs_inode *nfsi = NFS_I(inode);
309 	struct fscache_cookie *cookie = nfs_i_fscache(inode);
310 
311 	if (!fscache_cookie_valid(cookie))
312 		return;
313 
314 	nfs_fscache_update_auxdata(&auxdata, nfsi);
315 
316 	if (inode_is_open_for_write(inode)) {
317 		dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi);
318 		clear_bit(NFS_INO_FSCACHE, &nfsi->flags);
319 		fscache_disable_cookie(cookie, &auxdata, true);
320 		fscache_uncache_all_inode_pages(cookie, inode);
321 	} else {
322 		dfprintk(FSCACHE, "NFS: nfsi 0x%p enabling cache\n", nfsi);
323 		fscache_enable_cookie(cookie, &auxdata, nfsi->vfs_inode.i_size,
324 				      nfs_fscache_can_enable, inode);
325 		if (fscache_cookie_enabled(cookie))
326 			set_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
327 	}
328 }
329 EXPORT_SYMBOL_GPL(nfs_fscache_open_file);
330 
331 /*
332  * Release the caching state associated with a page, if the page isn't busy
333  * interacting with the cache.
334  * - Returns true (can release page) or false (page busy).
335  */
336 int nfs_fscache_release_page(struct page *page, gfp_t gfp)
337 {
338 	if (PageFsCache(page)) {
339 		struct fscache_cookie *cookie = nfs_i_fscache(page->mapping->host);
340 
341 		BUG_ON(!cookie);
342 		dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
343 			 cookie, page, NFS_I(page->mapping->host));
344 
345 		if (!fscache_maybe_release_page(cookie, page, gfp))
346 			return 0;
347 
348 		nfs_inc_fscache_stats(page->mapping->host,
349 				      NFSIOS_FSCACHE_PAGES_UNCACHED);
350 	}
351 
352 	return 1;
353 }
354 
355 /*
356  * Release the caching state associated with a page if undergoing complete page
357  * invalidation.
358  */
359 void __nfs_fscache_invalidate_page(struct page *page, struct inode *inode)
360 {
361 	struct fscache_cookie *cookie = nfs_i_fscache(inode);
362 
363 	BUG_ON(!cookie);
364 
365 	dfprintk(FSCACHE, "NFS: fscache invalidatepage (0x%p/0x%p/0x%p)\n",
366 		 cookie, page, NFS_I(inode));
367 
368 	fscache_wait_on_page_write(cookie, page);
369 
370 	BUG_ON(!PageLocked(page));
371 	fscache_uncache_page(cookie, page);
372 	nfs_inc_fscache_stats(page->mapping->host,
373 			      NFSIOS_FSCACHE_PAGES_UNCACHED);
374 }
375 
376 /*
377  * Handle completion of a page being read from the cache.
378  * - Called in process (keventd) context.
379  */
380 static void nfs_readpage_from_fscache_complete(struct page *page,
381 					       void *context,
382 					       int error)
383 {
384 	dfprintk(FSCACHE,
385 		 "NFS: readpage_from_fscache_complete (0x%p/0x%p/%d)\n",
386 		 page, context, error);
387 
388 	/* if the read completes with an error, we just unlock the page and let
389 	 * the VM reissue the readpage */
390 	if (!error) {
391 		SetPageUptodate(page);
392 		unlock_page(page);
393 	}
394 }
395 
396 /*
397  * Retrieve a page from fscache
398  */
399 int __nfs_readpage_from_fscache(struct nfs_open_context *ctx,
400 				struct inode *inode, struct page *page)
401 {
402 	int ret;
403 
404 	dfprintk(FSCACHE,
405 		 "NFS: readpage_from_fscache(fsc:%p/p:%p(i:%lx f:%lx)/0x%p)\n",
406 		 nfs_i_fscache(inode), page, page->index, page->flags, inode);
407 
408 	ret = fscache_read_or_alloc_page(nfs_i_fscache(inode),
409 					 page,
410 					 nfs_readpage_from_fscache_complete,
411 					 ctx,
412 					 GFP_KERNEL);
413 
414 	switch (ret) {
415 	case 0: /* read BIO submitted (page in fscache) */
416 		dfprintk(FSCACHE,
417 			 "NFS:    readpage_from_fscache: BIO submitted\n");
418 		nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK);
419 		return ret;
420 
421 	case -ENOBUFS: /* inode not in cache */
422 	case -ENODATA: /* page not in cache */
423 		nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL);
424 		dfprintk(FSCACHE,
425 			 "NFS:    readpage_from_fscache %d\n", ret);
426 		return 1;
427 
428 	default:
429 		dfprintk(FSCACHE, "NFS:    readpage_from_fscache %d\n", ret);
430 		nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL);
431 	}
432 	return ret;
433 }
434 
435 /*
436  * Retrieve a set of pages from fscache
437  */
438 int __nfs_readpages_from_fscache(struct nfs_open_context *ctx,
439 				 struct inode *inode,
440 				 struct address_space *mapping,
441 				 struct list_head *pages,
442 				 unsigned *nr_pages)
443 {
444 	unsigned npages = *nr_pages;
445 	int ret;
446 
447 	dfprintk(FSCACHE, "NFS: nfs_getpages_from_fscache (0x%p/%u/0x%p)\n",
448 		 nfs_i_fscache(inode), npages, inode);
449 
450 	ret = fscache_read_or_alloc_pages(nfs_i_fscache(inode),
451 					  mapping, pages, nr_pages,
452 					  nfs_readpage_from_fscache_complete,
453 					  ctx,
454 					  mapping_gfp_mask(mapping));
455 	if (*nr_pages < npages)
456 		nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK,
457 				      npages);
458 	if (*nr_pages > 0)
459 		nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL,
460 				      *nr_pages);
461 
462 	switch (ret) {
463 	case 0: /* read submitted to the cache for all pages */
464 		BUG_ON(!list_empty(pages));
465 		BUG_ON(*nr_pages != 0);
466 		dfprintk(FSCACHE,
467 			 "NFS: nfs_getpages_from_fscache: submitted\n");
468 
469 		return ret;
470 
471 	case -ENOBUFS: /* some pages aren't cached and can't be */
472 	case -ENODATA: /* some pages aren't cached */
473 		dfprintk(FSCACHE,
474 			 "NFS: nfs_getpages_from_fscache: no page: %d\n", ret);
475 		return 1;
476 
477 	default:
478 		dfprintk(FSCACHE,
479 			 "NFS: nfs_getpages_from_fscache: ret  %d\n", ret);
480 	}
481 
482 	return ret;
483 }
484 
485 /*
486  * Store a newly fetched page in fscache
487  * - PG_fscache must be set on the page
488  */
489 void __nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync)
490 {
491 	int ret;
492 
493 	dfprintk(FSCACHE,
494 		 "NFS: readpage_to_fscache(fsc:%p/p:%p(i:%lx f:%lx)/%d)\n",
495 		 nfs_i_fscache(inode), page, page->index, page->flags, sync);
496 
497 	ret = fscache_write_page(nfs_i_fscache(inode), page,
498 				 inode->i_size, GFP_KERNEL);
499 	dfprintk(FSCACHE,
500 		 "NFS:     readpage_to_fscache: p:%p(i:%lu f:%lx) ret %d\n",
501 		 page, page->index, page->flags, ret);
502 
503 	if (ret != 0) {
504 		fscache_uncache_page(nfs_i_fscache(inode), page);
505 		nfs_inc_fscache_stats(inode,
506 				      NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL);
507 		nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED);
508 	} else {
509 		nfs_inc_fscache_stats(inode,
510 				      NFSIOS_FSCACHE_PAGES_WRITTEN_OK);
511 	}
512 }
513