xref: /linux/fs/9p/cache.c (revision ebf68996de0ab250c5d520eb2291ab65643e9a1e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * V9FS cache definitions.
4  *
5  *  Copyright (C) 2009 by Abhishek Kulkarni <adkulkar@umail.iu.edu>
6  */
7 
8 #include <linux/jiffies.h>
9 #include <linux/file.h>
10 #include <linux/slab.h>
11 #include <linux/stat.h>
12 #include <linux/sched.h>
13 #include <linux/fs.h>
14 #include <net/9p/9p.h>
15 
16 #include "v9fs.h"
17 #include "cache.h"
18 
19 #define CACHETAG_LEN  11
20 
21 struct fscache_netfs v9fs_cache_netfs = {
22 	.name 		= "9p",
23 	.version 	= 0,
24 };
25 
26 /**
27  * v9fs_random_cachetag - Generate a random tag to be associated
28  *			  with a new cache session.
29  *
30  * The value of jiffies is used for a fairly randomly cache tag.
31  */
32 
33 static
34 int v9fs_random_cachetag(struct v9fs_session_info *v9ses)
35 {
36 	v9ses->cachetag = kmalloc(CACHETAG_LEN, GFP_KERNEL);
37 	if (!v9ses->cachetag)
38 		return -ENOMEM;
39 
40 	return scnprintf(v9ses->cachetag, CACHETAG_LEN, "%lu", jiffies);
41 }
42 
43 const struct fscache_cookie_def v9fs_cache_session_index_def = {
44 	.name		= "9P.session",
45 	.type		= FSCACHE_COOKIE_TYPE_INDEX,
46 };
47 
48 void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
49 {
50 	/* If no cache session tag was specified, we generate a random one. */
51 	if (!v9ses->cachetag) {
52 		if (v9fs_random_cachetag(v9ses) < 0) {
53 			v9ses->fscache = NULL;
54 			return;
55 		}
56 	}
57 
58 	v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
59 						&v9fs_cache_session_index_def,
60 						v9ses->cachetag,
61 						strlen(v9ses->cachetag),
62 						NULL, 0,
63 						v9ses, 0, true);
64 	p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n",
65 		 v9ses, v9ses->fscache);
66 }
67 
68 void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses)
69 {
70 	p9_debug(P9_DEBUG_FSC, "session %p put cookie %p\n",
71 		 v9ses, v9ses->fscache);
72 	fscache_relinquish_cookie(v9ses->fscache, NULL, false);
73 	v9ses->fscache = NULL;
74 }
75 
76 static enum
77 fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
78 					    const void *buffer,
79 					    uint16_t buflen,
80 					    loff_t object_size)
81 {
82 	const struct v9fs_inode *v9inode = cookie_netfs_data;
83 
84 	if (buflen != sizeof(v9inode->qid.version))
85 		return FSCACHE_CHECKAUX_OBSOLETE;
86 
87 	if (memcmp(buffer, &v9inode->qid.version,
88 		   sizeof(v9inode->qid.version)))
89 		return FSCACHE_CHECKAUX_OBSOLETE;
90 
91 	return FSCACHE_CHECKAUX_OKAY;
92 }
93 
94 const struct fscache_cookie_def v9fs_cache_inode_index_def = {
95 	.name		= "9p.inode",
96 	.type		= FSCACHE_COOKIE_TYPE_DATAFILE,
97 	.check_aux	= v9fs_cache_inode_check_aux,
98 };
99 
100 void v9fs_cache_inode_get_cookie(struct inode *inode)
101 {
102 	struct v9fs_inode *v9inode;
103 	struct v9fs_session_info *v9ses;
104 
105 	if (!S_ISREG(inode->i_mode))
106 		return;
107 
108 	v9inode = V9FS_I(inode);
109 	if (v9inode->fscache)
110 		return;
111 
112 	v9ses = v9fs_inode2v9ses(inode);
113 	v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
114 						  &v9fs_cache_inode_index_def,
115 						  &v9inode->qid.path,
116 						  sizeof(v9inode->qid.path),
117 						  &v9inode->qid.version,
118 						  sizeof(v9inode->qid.version),
119 						  v9inode,
120 						  i_size_read(&v9inode->vfs_inode),
121 						  true);
122 
123 	p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
124 		 inode, v9inode->fscache);
125 }
126 
127 void v9fs_cache_inode_put_cookie(struct inode *inode)
128 {
129 	struct v9fs_inode *v9inode = V9FS_I(inode);
130 
131 	if (!v9inode->fscache)
132 		return;
133 	p9_debug(P9_DEBUG_FSC, "inode %p put cookie %p\n",
134 		 inode, v9inode->fscache);
135 
136 	fscache_relinquish_cookie(v9inode->fscache, &v9inode->qid.version,
137 				  false);
138 	v9inode->fscache = NULL;
139 }
140 
141 void v9fs_cache_inode_flush_cookie(struct inode *inode)
142 {
143 	struct v9fs_inode *v9inode = V9FS_I(inode);
144 
145 	if (!v9inode->fscache)
146 		return;
147 	p9_debug(P9_DEBUG_FSC, "inode %p flush cookie %p\n",
148 		 inode, v9inode->fscache);
149 
150 	fscache_relinquish_cookie(v9inode->fscache, NULL, true);
151 	v9inode->fscache = NULL;
152 }
153 
154 void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
155 {
156 	struct v9fs_inode *v9inode = V9FS_I(inode);
157 
158 	if (!v9inode->fscache)
159 		return;
160 
161 	mutex_lock(&v9inode->fscache_lock);
162 
163 	if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
164 		v9fs_cache_inode_flush_cookie(inode);
165 	else
166 		v9fs_cache_inode_get_cookie(inode);
167 
168 	mutex_unlock(&v9inode->fscache_lock);
169 }
170 
171 void v9fs_cache_inode_reset_cookie(struct inode *inode)
172 {
173 	struct v9fs_inode *v9inode = V9FS_I(inode);
174 	struct v9fs_session_info *v9ses;
175 	struct fscache_cookie *old;
176 
177 	if (!v9inode->fscache)
178 		return;
179 
180 	old = v9inode->fscache;
181 
182 	mutex_lock(&v9inode->fscache_lock);
183 	fscache_relinquish_cookie(v9inode->fscache, NULL, true);
184 
185 	v9ses = v9fs_inode2v9ses(inode);
186 	v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
187 						  &v9fs_cache_inode_index_def,
188 						  &v9inode->qid.path,
189 						  sizeof(v9inode->qid.path),
190 						  &v9inode->qid.version,
191 						  sizeof(v9inode->qid.version),
192 						  v9inode,
193 						  i_size_read(&v9inode->vfs_inode),
194 						  true);
195 	p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n",
196 		 inode, old, v9inode->fscache);
197 
198 	mutex_unlock(&v9inode->fscache_lock);
199 }
200 
201 int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
202 {
203 	struct inode *inode = page->mapping->host;
204 	struct v9fs_inode *v9inode = V9FS_I(inode);
205 
206 	BUG_ON(!v9inode->fscache);
207 
208 	return fscache_maybe_release_page(v9inode->fscache, page, gfp);
209 }
210 
211 void __v9fs_fscache_invalidate_page(struct page *page)
212 {
213 	struct inode *inode = page->mapping->host;
214 	struct v9fs_inode *v9inode = V9FS_I(inode);
215 
216 	BUG_ON(!v9inode->fscache);
217 
218 	if (PageFsCache(page)) {
219 		fscache_wait_on_page_write(v9inode->fscache, page);
220 		BUG_ON(!PageLocked(page));
221 		fscache_uncache_page(v9inode->fscache, page);
222 	}
223 }
224 
225 static void v9fs_vfs_readpage_complete(struct page *page, void *data,
226 				       int error)
227 {
228 	if (!error)
229 		SetPageUptodate(page);
230 
231 	unlock_page(page);
232 }
233 
234 /**
235  * __v9fs_readpage_from_fscache - read a page from cache
236  *
237  * Returns 0 if the pages are in cache and a BIO is submitted,
238  * 1 if the pages are not in cache and -error otherwise.
239  */
240 
241 int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
242 {
243 	int ret;
244 	const struct v9fs_inode *v9inode = V9FS_I(inode);
245 
246 	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
247 	if (!v9inode->fscache)
248 		return -ENOBUFS;
249 
250 	ret = fscache_read_or_alloc_page(v9inode->fscache,
251 					 page,
252 					 v9fs_vfs_readpage_complete,
253 					 NULL,
254 					 GFP_KERNEL);
255 	switch (ret) {
256 	case -ENOBUFS:
257 	case -ENODATA:
258 		p9_debug(P9_DEBUG_FSC, "page/inode not in cache %d\n", ret);
259 		return 1;
260 	case 0:
261 		p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
262 		return ret;
263 	default:
264 		p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
265 		return ret;
266 	}
267 }
268 
269 /**
270  * __v9fs_readpages_from_fscache - read multiple pages from cache
271  *
272  * Returns 0 if the pages are in cache and a BIO is submitted,
273  * 1 if the pages are not in cache and -error otherwise.
274  */
275 
276 int __v9fs_readpages_from_fscache(struct inode *inode,
277 				  struct address_space *mapping,
278 				  struct list_head *pages,
279 				  unsigned *nr_pages)
280 {
281 	int ret;
282 	const struct v9fs_inode *v9inode = V9FS_I(inode);
283 
284 	p9_debug(P9_DEBUG_FSC, "inode %p pages %u\n", inode, *nr_pages);
285 	if (!v9inode->fscache)
286 		return -ENOBUFS;
287 
288 	ret = fscache_read_or_alloc_pages(v9inode->fscache,
289 					  mapping, pages, nr_pages,
290 					  v9fs_vfs_readpage_complete,
291 					  NULL,
292 					  mapping_gfp_mask(mapping));
293 	switch (ret) {
294 	case -ENOBUFS:
295 	case -ENODATA:
296 		p9_debug(P9_DEBUG_FSC, "pages/inodes not in cache %d\n", ret);
297 		return 1;
298 	case 0:
299 		BUG_ON(!list_empty(pages));
300 		BUG_ON(*nr_pages != 0);
301 		p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
302 		return ret;
303 	default:
304 		p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
305 		return ret;
306 	}
307 }
308 
309 /**
310  * __v9fs_readpage_to_fscache - write a page to the cache
311  *
312  */
313 
314 void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
315 {
316 	int ret;
317 	const struct v9fs_inode *v9inode = V9FS_I(inode);
318 
319 	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
320 	ret = fscache_write_page(v9inode->fscache, page,
321 				 i_size_read(&v9inode->vfs_inode), GFP_KERNEL);
322 	p9_debug(P9_DEBUG_FSC, "ret =  %d\n", ret);
323 	if (ret != 0)
324 		v9fs_uncache_page(inode, page);
325 }
326 
327 /*
328  * wait for a page to complete writing to the cache
329  */
330 void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
331 {
332 	const struct v9fs_inode *v9inode = V9FS_I(inode);
333 	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
334 	if (PageFsCache(page))
335 		fscache_wait_on_page_write(v9inode->fscache, page);
336 }
337