xref: /linux/fs/9p/vfs_addr.c (revision e0484344c0413e1fcd5642b77d49c7648fb194ec)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file contians vfs address (mmap) ops for 9P2000.
4  *
5  *  Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
6  *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/errno.h>
11 #include <linux/fs.h>
12 #include <linux/file.h>
13 #include <linux/stat.h>
14 #include <linux/string.h>
15 #include <linux/inet.h>
16 #include <linux/pagemap.h>
17 #include <linux/idr.h>
18 #include <linux/sched.h>
19 #include <linux/uio.h>
20 #include <linux/netfs.h>
21 #include <net/9p/9p.h>
22 #include <net/9p/client.h>
23 
24 #include "v9fs.h"
25 #include "v9fs_vfs.h"
26 #include "cache.h"
27 #include "fid.h"
28 
29 /**
30  * v9fs_req_issue_op - Issue a read from 9P
31  * @subreq: The read to make
32  */
33 static void v9fs_req_issue_op(struct netfs_read_subrequest *subreq)
34 {
35 	struct netfs_read_request *rreq = subreq->rreq;
36 	struct p9_fid *fid = rreq->netfs_priv;
37 	struct iov_iter to;
38 	loff_t pos = subreq->start + subreq->transferred;
39 	size_t len = subreq->len   - subreq->transferred;
40 	int total, err;
41 
42 	iov_iter_xarray(&to, READ, &rreq->mapping->i_pages, pos, len);
43 
44 	total = p9_client_read(fid, pos, &to, &err);
45 	netfs_subreq_terminated(subreq, err ?: total, false);
46 }
47 
48 /**
49  * v9fs_init_rreq - Initialise a read request
50  * @rreq: The read request
51  * @file: The file being read from
52  */
53 static void v9fs_init_rreq(struct netfs_read_request *rreq, struct file *file)
54 {
55 	struct p9_fid *fid = file->private_data;
56 
57 	refcount_inc(&fid->count);
58 	rreq->netfs_priv = fid;
59 }
60 
61 /**
62  * v9fs_req_cleanup - Cleanup request initialized by v9fs_init_rreq
63  * @mapping: unused mapping of request to cleanup
64  * @priv: private data to cleanup, a fid, guaranted non-null.
65  */
66 static void v9fs_req_cleanup(struct address_space *mapping, void *priv)
67 {
68 	struct p9_fid *fid = priv;
69 
70 	p9_client_clunk(fid);
71 }
72 
73 /**
74  * v9fs_is_cache_enabled - Determine if caching is enabled for an inode
75  * @inode: The inode to check
76  */
77 static bool v9fs_is_cache_enabled(struct inode *inode)
78 {
79 	struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(inode));
80 
81 	return fscache_cookie_enabled(cookie) && cookie->cache_priv;
82 }
83 
84 /**
85  * v9fs_begin_cache_operation - Begin a cache operation for a read
86  * @rreq: The read request
87  */
88 static int v9fs_begin_cache_operation(struct netfs_read_request *rreq)
89 {
90 #ifdef CONFIG_9P_FSCACHE
91 	struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
92 
93 	return fscache_begin_read_operation(&rreq->cache_resources, cookie);
94 #else
95 	return -ENOBUFS;
96 #endif
97 }
98 
99 static const struct netfs_read_request_ops v9fs_req_ops = {
100 	.init_rreq		= v9fs_init_rreq,
101 	.is_cache_enabled	= v9fs_is_cache_enabled,
102 	.begin_cache_operation	= v9fs_begin_cache_operation,
103 	.issue_op		= v9fs_req_issue_op,
104 	.cleanup		= v9fs_req_cleanup,
105 };
106 
107 /**
108  * v9fs_vfs_readpage - read an entire page in from 9P
109  * @file: file being read
110  * @page: structure to page
111  *
112  */
113 static int v9fs_vfs_readpage(struct file *file, struct page *page)
114 {
115 	struct folio *folio = page_folio(page);
116 
117 	return netfs_readpage(file, folio, &v9fs_req_ops, NULL);
118 }
119 
120 /**
121  * v9fs_vfs_readahead - read a set of pages from 9P
122  * @ractl: The readahead parameters
123  */
124 static void v9fs_vfs_readahead(struct readahead_control *ractl)
125 {
126 	netfs_readahead(ractl, &v9fs_req_ops, NULL);
127 }
128 
129 /**
130  * v9fs_release_page - release the private state associated with a page
131  * @page: The page to be released
132  * @gfp: The caller's allocation restrictions
133  *
134  * Returns 1 if the page can be released, false otherwise.
135  */
136 
137 static int v9fs_release_page(struct page *page, gfp_t gfp)
138 {
139 	struct folio *folio = page_folio(page);
140 	struct inode *inode = folio_inode(folio);
141 
142 	if (folio_test_private(folio))
143 		return 0;
144 #ifdef CONFIG_9P_FSCACHE
145 	if (folio_test_fscache(folio)) {
146 		if (!gfpflags_allow_blocking(gfp) || !(gfp & __GFP_FS))
147 			return 0;
148 		folio_wait_fscache(folio);
149 	}
150 #endif
151 	fscache_note_page_release(v9fs_inode_cookie(V9FS_I(inode)));
152 	return 1;
153 }
154 
155 /**
156  * v9fs_invalidate_page - Invalidate a page completely or partially
157  * @page: The page to be invalidated
158  * @offset: offset of the invalidated region
159  * @length: length of the invalidated region
160  */
161 
162 static void v9fs_invalidate_page(struct page *page, unsigned int offset,
163 				 unsigned int length)
164 {
165 	struct folio *folio = page_folio(page);
166 
167 	folio_wait_fscache(folio);
168 }
169 
170 static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
171 				     bool was_async)
172 {
173 	struct v9fs_inode *v9inode = priv;
174 	__le32 version;
175 
176 	if (IS_ERR_VALUE(transferred_or_error) &&
177 	    transferred_or_error != -ENOBUFS) {
178 		version = cpu_to_le32(v9inode->qid.version);
179 		fscache_invalidate(v9fs_inode_cookie(v9inode), &version,
180 				   i_size_read(&v9inode->vfs_inode), 0);
181 	}
182 }
183 
184 static int v9fs_vfs_write_folio_locked(struct folio *folio)
185 {
186 	struct inode *inode = folio_inode(folio);
187 	struct v9fs_inode *v9inode = V9FS_I(inode);
188 	struct fscache_cookie *cookie = v9fs_inode_cookie(v9inode);
189 	loff_t start = folio_pos(folio);
190 	loff_t i_size = i_size_read(inode);
191 	struct iov_iter from;
192 	size_t len = folio_size(folio);
193 	int err;
194 
195 	if (start >= i_size)
196 		return 0; /* Simultaneous truncation occurred */
197 
198 	len = min_t(loff_t, i_size - start, len);
199 
200 	iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len);
201 
202 	/* We should have writeback_fid always set */
203 	BUG_ON(!v9inode->writeback_fid);
204 
205 	folio_wait_fscache(folio);
206 	folio_start_writeback(folio);
207 
208 	p9_client_write(v9inode->writeback_fid, start, &from, &err);
209 
210 	if (err == 0 &&
211 	    fscache_cookie_enabled(cookie) &&
212 	    test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) {
213 		folio_start_fscache(folio);
214 		fscache_write_to_cache(v9fs_inode_cookie(v9inode),
215 				       folio_mapping(folio), start, len, i_size,
216 				       v9fs_write_to_cache_done, v9inode,
217 				       true);
218 	}
219 
220 	folio_end_writeback(folio);
221 	return err;
222 }
223 
224 static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
225 {
226 	struct folio *folio = page_folio(page);
227 	int retval;
228 
229 	p9_debug(P9_DEBUG_VFS, "folio %p\n", folio);
230 
231 	retval = v9fs_vfs_write_folio_locked(folio);
232 	if (retval < 0) {
233 		if (retval == -EAGAIN) {
234 			folio_redirty_for_writepage(wbc, folio);
235 			retval = 0;
236 		} else {
237 			mapping_set_error(folio_mapping(folio), retval);
238 		}
239 	} else
240 		retval = 0;
241 
242 	folio_unlock(folio);
243 	return retval;
244 }
245 
246 /**
247  * v9fs_launder_page - Writeback a dirty page
248  * @page: The page to be cleaned up
249  *
250  * Returns 0 on success.
251  */
252 
253 static int v9fs_launder_page(struct page *page)
254 {
255 	struct folio *folio = page_folio(page);
256 	int retval;
257 
258 	if (folio_clear_dirty_for_io(folio)) {
259 		retval = v9fs_vfs_write_folio_locked(folio);
260 		if (retval)
261 			return retval;
262 	}
263 	folio_wait_fscache(folio);
264 	return 0;
265 }
266 
267 /**
268  * v9fs_direct_IO - 9P address space operation for direct I/O
269  * @iocb: target I/O control block
270  * @iter: The data/buffer to use
271  *
272  * The presence of v9fs_direct_IO() in the address space ops vector
273  * allowes open() O_DIRECT flags which would have failed otherwise.
274  *
275  * In the non-cached mode, we shunt off direct read and write requests before
276  * the VFS gets them, so this method should never be called.
277  *
278  * Direct IO is not 'yet' supported in the cached mode. Hence when
279  * this routine is called through generic_file_aio_read(), the read/write fails
280  * with an error.
281  *
282  */
283 static ssize_t
284 v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
285 {
286 	struct file *file = iocb->ki_filp;
287 	loff_t pos = iocb->ki_pos;
288 	ssize_t n;
289 	int err = 0;
290 
291 	if (iov_iter_rw(iter) == WRITE) {
292 		n = p9_client_write(file->private_data, pos, iter, &err);
293 		if (n) {
294 			struct inode *inode = file_inode(file);
295 			loff_t i_size = i_size_read(inode);
296 
297 			if (pos + n > i_size)
298 				inode_add_bytes(inode, pos + n - i_size);
299 		}
300 	} else {
301 		n = p9_client_read(file->private_data, pos, iter, &err);
302 	}
303 	return n ? n : err;
304 }
305 
306 static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
307 			    loff_t pos, unsigned int len, unsigned int flags,
308 			    struct page **subpagep, void **fsdata)
309 {
310 	int retval;
311 	struct folio *folio;
312 	struct v9fs_inode *v9inode = V9FS_I(mapping->host);
313 
314 	p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
315 
316 	BUG_ON(!v9inode->writeback_fid);
317 
318 	/* Prefetch area to be written into the cache if we're caching this
319 	 * file.  We need to do this before we get a lock on the page in case
320 	 * there's more than one writer competing for the same cache block.
321 	 */
322 	retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata,
323 				   &v9fs_req_ops, NULL);
324 	if (retval < 0)
325 		return retval;
326 
327 	*subpagep = &folio->page;
328 	return retval;
329 }
330 
331 static int v9fs_write_end(struct file *filp, struct address_space *mapping,
332 			  loff_t pos, unsigned int len, unsigned int copied,
333 			  struct page *subpage, void *fsdata)
334 {
335 	loff_t last_pos = pos + copied;
336 	struct folio *folio = page_folio(subpage);
337 	struct inode *inode = mapping->host;
338 	struct v9fs_inode *v9inode = V9FS_I(inode);
339 
340 	p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
341 
342 	if (!folio_test_uptodate(folio)) {
343 		if (unlikely(copied < len)) {
344 			copied = 0;
345 			goto out;
346 		}
347 
348 		folio_mark_uptodate(folio);
349 	}
350 
351 	/*
352 	 * No need to use i_size_read() here, the i_size
353 	 * cannot change under us because we hold the i_mutex.
354 	 */
355 	if (last_pos > inode->i_size) {
356 		inode_add_bytes(inode, last_pos - inode->i_size);
357 		i_size_write(inode, last_pos);
358 		fscache_update_cookie(v9fs_inode_cookie(v9inode), NULL, &last_pos);
359 	}
360 	folio_mark_dirty(folio);
361 out:
362 	folio_unlock(folio);
363 	folio_put(folio);
364 
365 	return copied;
366 }
367 
368 #ifdef CONFIG_9P_FSCACHE
369 /*
370  * Mark a page as having been made dirty and thus needing writeback.  We also
371  * need to pin the cache object to write back to.
372  */
373 static int v9fs_set_page_dirty(struct page *page)
374 {
375 	struct v9fs_inode *v9inode = V9FS_I(page->mapping->host);
376 
377 	return fscache_set_page_dirty(page, v9fs_inode_cookie(v9inode));
378 }
379 #else
380 #define v9fs_set_page_dirty __set_page_dirty_nobuffers
381 #endif
382 
383 const struct address_space_operations v9fs_addr_operations = {
384 	.readpage = v9fs_vfs_readpage,
385 	.readahead = v9fs_vfs_readahead,
386 	.set_page_dirty = v9fs_set_page_dirty,
387 	.writepage = v9fs_vfs_writepage,
388 	.write_begin = v9fs_write_begin,
389 	.write_end = v9fs_write_end,
390 	.releasepage = v9fs_release_page,
391 	.invalidatepage = v9fs_invalidate_page,
392 	.launder_page = v9fs_launder_page,
393 	.direct_IO = v9fs_direct_IO,
394 };
395