xref: /linux/fs/nfs/fscache.h (revision c61fea676bcb5f14adcd882a7f7d9c5b082fe922)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* NFS filesystem cache interface definitions
3  *
4  * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #ifndef _NFS_FSCACHE_H
9 #define _NFS_FSCACHE_H
10 
11 #include <linux/swap.h>
12 #include <linux/nfs_fs.h>
13 #include <linux/nfs_mount.h>
14 #include <linux/nfs4_mount.h>
15 #include <linux/fscache.h>
16 #include <linux/iversion.h>
17 
18 #ifdef CONFIG_NFS_FSCACHE
19 
20 /*
21  * Definition of the auxiliary data attached to NFS inode storage objects
22  * within the cache.
23  *
24  * The contents of this struct are recorded in the on-disk local cache in the
25  * auxiliary data attached to the data storage object backing an inode.  This
26  * permits coherency to be managed when a new inode binds to an already extant
27  * cache object.
28  */
29 struct nfs_fscache_inode_auxdata {
30 	s64	mtime_sec;
31 	s64	mtime_nsec;
32 	s64	ctime_sec;
33 	s64	ctime_nsec;
34 	u64	change_attr;
35 };
36 
37 struct nfs_netfs_io_data {
38 	/*
39 	 * NFS may split a netfs_io_subrequest into multiple RPCs, each
40 	 * with their own read completion.  In netfs, we can only call
41 	 * netfs_subreq_terminated() once for each subrequest.  Use the
42 	 * refcount here to double as a marker of the last RPC completion,
43 	 * and only call netfs via netfs_subreq_terminated() once.
44 	 */
45 	refcount_t			refcount;
46 	struct netfs_io_subrequest	*sreq;
47 
48 	/*
49 	 * Final disposition of the netfs_io_subrequest, sent in
50 	 * netfs_subreq_terminated()
51 	 */
52 	atomic64_t	transferred;
53 	int		error;
54 };
55 
56 static inline void nfs_netfs_get(struct nfs_netfs_io_data *netfs)
57 {
58 	refcount_inc(&netfs->refcount);
59 }
60 
61 static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs)
62 {
63 	ssize_t final_len;
64 
65 	/* Only the last RPC completion should call netfs_subreq_terminated() */
66 	if (!refcount_dec_and_test(&netfs->refcount))
67 		return;
68 
69 	/*
70 	 * The NFS pageio interface may read a complete page, even when netfs
71 	 * only asked for a partial page.  Specifically, this may be seen when
72 	 * one thread is truncating a file while another one is reading the last
73 	 * page of the file.
74 	 * Correct the final length here to be no larger than the netfs subrequest
75 	 * length, and thus avoid netfs's "Subreq overread" warning message.
76 	 */
77 	final_len = min_t(s64, netfs->sreq->len, atomic64_read(&netfs->transferred));
78 	netfs_subreq_terminated(netfs->sreq, netfs->error ?: final_len, false);
79 	kfree(netfs);
80 }
81 static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi)
82 {
83 	netfs_inode_init(&nfsi->netfs, &nfs_netfs_ops, false);
84 	/* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
85 	__set_bit(NETFS_ICTX_USE_PGPRIV2, &nfsi->netfs.flags);
86 }
87 extern void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr);
88 extern void nfs_netfs_read_completion(struct nfs_pgio_header *hdr);
89 extern int nfs_netfs_folio_unlock(struct folio *folio);
90 
91 /*
92  * fscache.c
93  */
94 extern int nfs_fscache_get_super_cookie(struct super_block *, const char *, int);
95 extern void nfs_fscache_release_super_cookie(struct super_block *);
96 
97 extern void nfs_fscache_init_inode(struct inode *);
98 extern void nfs_fscache_clear_inode(struct inode *);
99 extern void nfs_fscache_open_file(struct inode *, struct file *);
100 extern void nfs_fscache_release_file(struct inode *, struct file *);
101 extern int nfs_netfs_readahead(struct readahead_control *ractl);
102 extern int nfs_netfs_read_folio(struct file *file, struct folio *folio);
103 
104 static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
105 {
106 	if (folio_test_private_2(folio)) { /* [DEPRECATED] */
107 		if (current_is_kswapd() || !(gfp & __GFP_FS))
108 			return false;
109 		folio_wait_private_2(folio);
110 	}
111 	fscache_note_page_release(netfs_i_cookie(netfs_inode(folio->mapping->host)));
112 	return true;
113 }
114 
115 static inline void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata,
116 					      struct inode *inode)
117 {
118 	memset(auxdata, 0, sizeof(*auxdata));
119 	auxdata->mtime_sec  = inode_get_mtime(inode).tv_sec;
120 	auxdata->mtime_nsec = inode_get_mtime(inode).tv_nsec;
121 	auxdata->ctime_sec  = inode_get_ctime(inode).tv_sec;
122 	auxdata->ctime_nsec = inode_get_ctime(inode).tv_nsec;
123 
124 	if (NFS_SERVER(inode)->nfs_client->rpc_ops->version == 4)
125 		auxdata->change_attr = inode_peek_iversion_raw(inode);
126 }
127 
128 /*
129  * Invalidate the contents of fscache for this inode.  This will not sleep.
130  */
131 static inline void nfs_fscache_invalidate(struct inode *inode, int flags)
132 {
133 	struct nfs_fscache_inode_auxdata auxdata;
134 	struct fscache_cookie *cookie =  netfs_i_cookie(&NFS_I(inode)->netfs);
135 
136 	nfs_fscache_update_auxdata(&auxdata, inode);
137 	fscache_invalidate(cookie, &auxdata, i_size_read(inode), flags);
138 }
139 
140 /*
141  * indicate the client caching state as readable text
142  */
143 static inline const char *nfs_server_fscache_state(struct nfs_server *server)
144 {
145 	if (server->fscache)
146 		return "yes";
147 	return "no ";
148 }
149 
150 static inline void nfs_netfs_set_pgio_header(struct nfs_pgio_header *hdr,
151 					     struct nfs_pageio_descriptor *desc)
152 {
153 	hdr->netfs = desc->pg_netfs;
154 }
155 static inline void nfs_netfs_set_pageio_descriptor(struct nfs_pageio_descriptor *desc,
156 						   struct nfs_pgio_header *hdr)
157 {
158 	desc->pg_netfs = hdr->netfs;
159 }
160 static inline void nfs_netfs_reset_pageio_descriptor(struct nfs_pageio_descriptor *desc)
161 {
162 	desc->pg_netfs = NULL;
163 }
164 #else /* CONFIG_NFS_FSCACHE */
165 static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi) {}
166 static inline void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr) {}
167 static inline void nfs_netfs_read_completion(struct nfs_pgio_header *hdr) {}
168 static inline int nfs_netfs_folio_unlock(struct folio *folio)
169 {
170 	return 1;
171 }
172 static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {}
173 
174 static inline void nfs_fscache_init_inode(struct inode *inode) {}
175 static inline void nfs_fscache_clear_inode(struct inode *inode) {}
176 static inline void nfs_fscache_open_file(struct inode *inode,
177 					 struct file *filp) {}
178 static inline void nfs_fscache_release_file(struct inode *inode, struct file *file) {}
179 static inline int nfs_netfs_readahead(struct readahead_control *ractl)
180 {
181 	return -ENOBUFS;
182 }
183 static inline int nfs_netfs_read_folio(struct file *file, struct folio *folio)
184 {
185 	return -ENOBUFS;
186 }
187 
188 static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
189 {
190 	return true; /* may release folio */
191 }
192 static inline void nfs_fscache_invalidate(struct inode *inode, int flags) {}
193 
194 static inline const char *nfs_server_fscache_state(struct nfs_server *server)
195 {
196 	return "no ";
197 }
198 static inline void nfs_netfs_set_pgio_header(struct nfs_pgio_header *hdr,
199 					     struct nfs_pageio_descriptor *desc) {}
200 static inline void nfs_netfs_set_pageio_descriptor(struct nfs_pageio_descriptor *desc,
201 						   struct nfs_pgio_header *hdr) {}
202 static inline void nfs_netfs_reset_pageio_descriptor(struct nfs_pageio_descriptor *desc) {}
203 #endif /* CONFIG_NFS_FSCACHE */
204 #endif /* _NFS_FSCACHE_H */
205