xref: /linux/fs/9p/vfs_file.c (revision 69bfec7548f4c1595bac0e3ddfc0458a5af31f4c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file contians vfs file ops for 9P2000.
4  *
5  *  Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
6  *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/errno.h>
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/sched.h>
14 #include <linux/file.h>
15 #include <linux/stat.h>
16 #include <linux/string.h>
17 #include <linux/inet.h>
18 #include <linux/list.h>
19 #include <linux/pagemap.h>
20 #include <linux/utsname.h>
21 #include <linux/uaccess.h>
22 #include <linux/uio.h>
23 #include <linux/slab.h>
24 #include <net/9p/9p.h>
25 #include <net/9p/client.h>
26 
27 #include "v9fs.h"
28 #include "v9fs_vfs.h"
29 #include "fid.h"
30 #include "cache.h"
31 
32 static const struct vm_operations_struct v9fs_file_vm_ops;
33 static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
34 
35 /**
36  * v9fs_file_open - open a file (or directory)
37  * @inode: inode to be opened
38  * @file: file being opened
39  *
40  */
41 
42 int v9fs_file_open(struct inode *inode, struct file *file)
43 {
44 	int err;
45 	struct v9fs_inode *v9inode;
46 	struct v9fs_session_info *v9ses;
47 	struct p9_fid *fid, *writeback_fid;
48 	int omode;
49 
50 	p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
51 	v9inode = V9FS_I(inode);
52 	v9ses = v9fs_inode2v9ses(inode);
53 	if (v9fs_proto_dotl(v9ses))
54 		omode = v9fs_open_to_dotl_flags(file->f_flags);
55 	else
56 		omode = v9fs_uflags2omode(file->f_flags,
57 					v9fs_proto_dotu(v9ses));
58 	fid = file->private_data;
59 	if (!fid) {
60 		fid = v9fs_fid_clone(file_dentry(file));
61 		if (IS_ERR(fid))
62 			return PTR_ERR(fid);
63 
64 		err = p9_client_open(fid, omode);
65 		if (err < 0) {
66 			p9_fid_put(fid);
67 			return err;
68 		}
69 		if ((file->f_flags & O_APPEND) &&
70 			(!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)))
71 			generic_file_llseek(file, 0, SEEK_END);
72 
73 		file->private_data = fid;
74 	}
75 
76 	mutex_lock(&v9inode->v_mutex);
77 	if ((v9ses->cache) && !v9inode->writeback_fid &&
78 	    ((file->f_flags & O_ACCMODE) != O_RDONLY)) {
79 		/*
80 		 * clone a fid and add it to writeback_fid
81 		 * we do it during open time instead of
82 		 * page dirty time via write_begin/page_mkwrite
83 		 * because we want write after unlink usecase
84 		 * to work.
85 		 */
86 		writeback_fid = v9fs_writeback_fid(file_dentry(file));
87 		if (IS_ERR(writeback_fid)) {
88 			err = PTR_ERR(writeback_fid);
89 			mutex_unlock(&v9inode->v_mutex);
90 			goto out_error;
91 		}
92 		v9inode->writeback_fid = (void *) writeback_fid;
93 	}
94 	mutex_unlock(&v9inode->v_mutex);
95 #ifdef CONFIG_9P_FSCACHE
96 	if (v9ses->cache == CACHE_FSCACHE)
97 		fscache_use_cookie(v9fs_inode_cookie(v9inode),
98 				   file->f_mode & FMODE_WRITE);
99 #endif
100 	v9fs_open_fid_add(inode, &fid);
101 	return 0;
102 out_error:
103 	p9_fid_put(file->private_data);
104 	file->private_data = NULL;
105 	return err;
106 }
107 
108 /**
109  * v9fs_file_lock - lock a file (or directory)
110  * @filp: file to be locked
111  * @cmd: lock command
112  * @fl: file lock structure
113  *
114  * Bugs: this looks like a local only lock, we should extend into 9P
115  *       by using open exclusive
116  */
117 
118 static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
119 {
120 	struct inode *inode = file_inode(filp);
121 
122 	p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
123 
124 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
125 		filemap_write_and_wait(inode->i_mapping);
126 		invalidate_mapping_pages(&inode->i_data, 0, -1);
127 	}
128 
129 	return 0;
130 }
131 
132 static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
133 {
134 	struct p9_flock flock;
135 	struct p9_fid *fid;
136 	uint8_t status = P9_LOCK_ERROR;
137 	int res = 0;
138 	unsigned char fl_type;
139 	struct v9fs_session_info *v9ses;
140 
141 	fid = filp->private_data;
142 	BUG_ON(fid == NULL);
143 
144 	BUG_ON((fl->fl_flags & FL_POSIX) != FL_POSIX);
145 
146 	res = locks_lock_file_wait(filp, fl);
147 	if (res < 0)
148 		goto out;
149 
150 	/* convert posix lock to p9 tlock args */
151 	memset(&flock, 0, sizeof(flock));
152 	/* map the lock type */
153 	switch (fl->fl_type) {
154 	case F_RDLCK:
155 		flock.type = P9_LOCK_TYPE_RDLCK;
156 		break;
157 	case F_WRLCK:
158 		flock.type = P9_LOCK_TYPE_WRLCK;
159 		break;
160 	case F_UNLCK:
161 		flock.type = P9_LOCK_TYPE_UNLCK;
162 		break;
163 	}
164 	flock.start = fl->fl_start;
165 	if (fl->fl_end == OFFSET_MAX)
166 		flock.length = 0;
167 	else
168 		flock.length = fl->fl_end - fl->fl_start + 1;
169 	flock.proc_id = fl->fl_pid;
170 	flock.client_id = fid->clnt->name;
171 	if (IS_SETLKW(cmd))
172 		flock.flags = P9_LOCK_FLAGS_BLOCK;
173 
174 	v9ses = v9fs_inode2v9ses(file_inode(filp));
175 
176 	/*
177 	 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
178 	 * for lock request, keep on trying
179 	 */
180 	for (;;) {
181 		res = p9_client_lock_dotl(fid, &flock, &status);
182 		if (res < 0)
183 			goto out_unlock;
184 
185 		if (status != P9_LOCK_BLOCKED)
186 			break;
187 		if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
188 			break;
189 		if (schedule_timeout_interruptible(v9ses->session_lock_timeout)
190 				!= 0)
191 			break;
192 		/*
193 		 * p9_client_lock_dotl overwrites flock.client_id with the
194 		 * server message, free and reuse the client name
195 		 */
196 		if (flock.client_id != fid->clnt->name) {
197 			kfree(flock.client_id);
198 			flock.client_id = fid->clnt->name;
199 		}
200 	}
201 
202 	/* map 9p status to VFS status */
203 	switch (status) {
204 	case P9_LOCK_SUCCESS:
205 		res = 0;
206 		break;
207 	case P9_LOCK_BLOCKED:
208 		res = -EAGAIN;
209 		break;
210 	default:
211 		WARN_ONCE(1, "unknown lock status code: %d\n", status);
212 		fallthrough;
213 	case P9_LOCK_ERROR:
214 	case P9_LOCK_GRACE:
215 		res = -ENOLCK;
216 		break;
217 	}
218 
219 out_unlock:
220 	/*
221 	 * incase server returned error for lock request, revert
222 	 * it locally
223 	 */
224 	if (res < 0 && fl->fl_type != F_UNLCK) {
225 		fl_type = fl->fl_type;
226 		fl->fl_type = F_UNLCK;
227 		/* Even if this fails we want to return the remote error */
228 		locks_lock_file_wait(filp, fl);
229 		fl->fl_type = fl_type;
230 	}
231 	if (flock.client_id != fid->clnt->name)
232 		kfree(flock.client_id);
233 out:
234 	return res;
235 }
236 
237 static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
238 {
239 	struct p9_getlock glock;
240 	struct p9_fid *fid;
241 	int res = 0;
242 
243 	fid = filp->private_data;
244 	BUG_ON(fid == NULL);
245 
246 	posix_test_lock(filp, fl);
247 	/*
248 	 * if we have a conflicting lock locally, no need to validate
249 	 * with server
250 	 */
251 	if (fl->fl_type != F_UNLCK)
252 		return res;
253 
254 	/* convert posix lock to p9 tgetlock args */
255 	memset(&glock, 0, sizeof(glock));
256 	glock.type  = P9_LOCK_TYPE_UNLCK;
257 	glock.start = fl->fl_start;
258 	if (fl->fl_end == OFFSET_MAX)
259 		glock.length = 0;
260 	else
261 		glock.length = fl->fl_end - fl->fl_start + 1;
262 	glock.proc_id = fl->fl_pid;
263 	glock.client_id = fid->clnt->name;
264 
265 	res = p9_client_getlock_dotl(fid, &glock);
266 	if (res < 0)
267 		goto out;
268 	/* map 9p lock type to os lock type */
269 	switch (glock.type) {
270 	case P9_LOCK_TYPE_RDLCK:
271 		fl->fl_type = F_RDLCK;
272 		break;
273 	case P9_LOCK_TYPE_WRLCK:
274 		fl->fl_type = F_WRLCK;
275 		break;
276 	case P9_LOCK_TYPE_UNLCK:
277 		fl->fl_type = F_UNLCK;
278 		break;
279 	}
280 	if (glock.type != P9_LOCK_TYPE_UNLCK) {
281 		fl->fl_start = glock.start;
282 		if (glock.length == 0)
283 			fl->fl_end = OFFSET_MAX;
284 		else
285 			fl->fl_end = glock.start + glock.length - 1;
286 		fl->fl_pid = -glock.proc_id;
287 	}
288 out:
289 	if (glock.client_id != fid->clnt->name)
290 		kfree(glock.client_id);
291 	return res;
292 }
293 
294 /**
295  * v9fs_file_lock_dotl - lock a file (or directory)
296  * @filp: file to be locked
297  * @cmd: lock command
298  * @fl: file lock structure
299  *
300  */
301 
302 static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
303 {
304 	struct inode *inode = file_inode(filp);
305 	int ret = -ENOLCK;
306 
307 	p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
308 		 filp, cmd, fl, filp);
309 
310 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
311 		filemap_write_and_wait(inode->i_mapping);
312 		invalidate_mapping_pages(&inode->i_data, 0, -1);
313 	}
314 
315 	if (IS_SETLK(cmd) || IS_SETLKW(cmd))
316 		ret = v9fs_file_do_lock(filp, cmd, fl);
317 	else if (IS_GETLK(cmd))
318 		ret = v9fs_file_getlock(filp, fl);
319 	else
320 		ret = -EINVAL;
321 	return ret;
322 }
323 
324 /**
325  * v9fs_file_flock_dotl - lock a file
326  * @filp: file to be locked
327  * @cmd: lock command
328  * @fl: file lock structure
329  *
330  */
331 
332 static int v9fs_file_flock_dotl(struct file *filp, int cmd,
333 	struct file_lock *fl)
334 {
335 	struct inode *inode = file_inode(filp);
336 	int ret = -ENOLCK;
337 
338 	p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
339 		 filp, cmd, fl, filp);
340 
341 	if (!(fl->fl_flags & FL_FLOCK))
342 		goto out_err;
343 
344 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
345 		filemap_write_and_wait(inode->i_mapping);
346 		invalidate_mapping_pages(&inode->i_data, 0, -1);
347 	}
348 	/* Convert flock to posix lock */
349 	fl->fl_flags |= FL_POSIX;
350 	fl->fl_flags ^= FL_FLOCK;
351 
352 	if (IS_SETLK(cmd) | IS_SETLKW(cmd))
353 		ret = v9fs_file_do_lock(filp, cmd, fl);
354 	else
355 		ret = -EINVAL;
356 out_err:
357 	return ret;
358 }
359 
360 /**
361  * v9fs_file_read_iter - read from a file
362  * @iocb: The operation parameters
363  * @to: The buffer to read into
364  *
365  */
366 static ssize_t
367 v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
368 {
369 	struct p9_fid *fid = iocb->ki_filp->private_data;
370 	int ret, err = 0;
371 
372 	p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n",
373 		 iov_iter_count(to), iocb->ki_pos);
374 
375 	if (iocb->ki_filp->f_flags & O_NONBLOCK)
376 		ret = p9_client_read_once(fid, iocb->ki_pos, to, &err);
377 	else
378 		ret = p9_client_read(fid, iocb->ki_pos, to, &err);
379 	if (!ret)
380 		return err;
381 
382 	iocb->ki_pos += ret;
383 	return ret;
384 }
385 
386 /**
387  * v9fs_file_write_iter - write to a file
388  * @iocb: The operation parameters
389  * @from: The data to write
390  *
391  */
392 static ssize_t
393 v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
394 {
395 	struct file *file = iocb->ki_filp;
396 	ssize_t retval;
397 	loff_t origin;
398 	int err = 0;
399 
400 	retval = generic_write_checks(iocb, from);
401 	if (retval <= 0)
402 		return retval;
403 
404 	origin = iocb->ki_pos;
405 	retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err);
406 	if (retval > 0) {
407 		struct inode *inode = file_inode(file);
408 		loff_t i_size;
409 		unsigned long pg_start, pg_end;
410 
411 		pg_start = origin >> PAGE_SHIFT;
412 		pg_end = (origin + retval - 1) >> PAGE_SHIFT;
413 		if (inode->i_mapping && inode->i_mapping->nrpages)
414 			invalidate_inode_pages2_range(inode->i_mapping,
415 						      pg_start, pg_end);
416 		iocb->ki_pos += retval;
417 		i_size = i_size_read(inode);
418 		if (iocb->ki_pos > i_size) {
419 			inode_add_bytes(inode, iocb->ki_pos - i_size);
420 			/*
421 			 * Need to serialize against i_size_write() in
422 			 * v9fs_stat2inode()
423 			 */
424 			v9fs_i_size_write(inode, iocb->ki_pos);
425 		}
426 		return retval;
427 	}
428 	return err;
429 }
430 
431 static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
432 			   int datasync)
433 {
434 	struct p9_fid *fid;
435 	struct inode *inode = filp->f_mapping->host;
436 	struct p9_wstat wstat;
437 	int retval;
438 
439 	retval = file_write_and_wait_range(filp, start, end);
440 	if (retval)
441 		return retval;
442 
443 	inode_lock(inode);
444 	p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
445 
446 	fid = filp->private_data;
447 	v9fs_blank_wstat(&wstat);
448 
449 	retval = p9_client_wstat(fid, &wstat);
450 	inode_unlock(inode);
451 
452 	return retval;
453 }
454 
455 int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
456 			 int datasync)
457 {
458 	struct p9_fid *fid;
459 	struct inode *inode = filp->f_mapping->host;
460 	int retval;
461 
462 	retval = file_write_and_wait_range(filp, start, end);
463 	if (retval)
464 		return retval;
465 
466 	inode_lock(inode);
467 	p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
468 
469 	fid = filp->private_data;
470 
471 	retval = p9_client_fsync(fid, datasync);
472 	inode_unlock(inode);
473 
474 	return retval;
475 }
476 
477 static int
478 v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
479 {
480 	int retval;
481 
482 
483 	retval = generic_file_mmap(filp, vma);
484 	if (!retval)
485 		vma->vm_ops = &v9fs_file_vm_ops;
486 
487 	return retval;
488 }
489 
490 static int
491 v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
492 {
493 	int retval;
494 	struct inode *inode;
495 	struct v9fs_inode *v9inode;
496 	struct p9_fid *fid;
497 
498 	inode = file_inode(filp);
499 	v9inode = V9FS_I(inode);
500 	mutex_lock(&v9inode->v_mutex);
501 	if (!v9inode->writeback_fid &&
502 	    (vma->vm_flags & VM_SHARED) &&
503 	    (vma->vm_flags & VM_WRITE)) {
504 		/*
505 		 * clone a fid and add it to writeback_fid
506 		 * we do it during mmap instead of
507 		 * page dirty time via write_begin/page_mkwrite
508 		 * because we want write after unlink usecase
509 		 * to work.
510 		 */
511 		fid = v9fs_writeback_fid(file_dentry(filp));
512 		if (IS_ERR(fid)) {
513 			retval = PTR_ERR(fid);
514 			mutex_unlock(&v9inode->v_mutex);
515 			return retval;
516 		}
517 		v9inode->writeback_fid = (void *) fid;
518 	}
519 	mutex_unlock(&v9inode->v_mutex);
520 
521 	retval = generic_file_mmap(filp, vma);
522 	if (!retval)
523 		vma->vm_ops = &v9fs_mmap_file_vm_ops;
524 
525 	return retval;
526 }
527 
528 static vm_fault_t
529 v9fs_vm_page_mkwrite(struct vm_fault *vmf)
530 {
531 	struct v9fs_inode *v9inode;
532 	struct folio *folio = page_folio(vmf->page);
533 	struct file *filp = vmf->vma->vm_file;
534 	struct inode *inode = file_inode(filp);
535 
536 
537 	p9_debug(P9_DEBUG_VFS, "folio %p fid %lx\n",
538 		 folio, (unsigned long)filp->private_data);
539 
540 	v9inode = V9FS_I(inode);
541 
542 	/* Wait for the page to be written to the cache before we allow it to
543 	 * be modified.  We then assume the entire page will need writing back.
544 	 */
545 #ifdef CONFIG_9P_FSCACHE
546 	if (folio_test_fscache(folio) &&
547 	    folio_wait_fscache_killable(folio) < 0)
548 		return VM_FAULT_NOPAGE;
549 #endif
550 
551 	/* Update file times before taking page lock */
552 	file_update_time(filp);
553 
554 	BUG_ON(!v9inode->writeback_fid);
555 	if (folio_lock_killable(folio) < 0)
556 		return VM_FAULT_RETRY;
557 	if (folio_mapping(folio) != inode->i_mapping)
558 		goto out_unlock;
559 	folio_wait_stable(folio);
560 
561 	return VM_FAULT_LOCKED;
562 out_unlock:
563 	folio_unlock(folio);
564 	return VM_FAULT_NOPAGE;
565 }
566 
567 /**
568  * v9fs_mmap_file_read_iter - read from a file
569  * @iocb: The operation parameters
570  * @to: The buffer to read into
571  *
572  */
573 static ssize_t
574 v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
575 {
576 	/* TODO: Check if there are dirty pages */
577 	return v9fs_file_read_iter(iocb, to);
578 }
579 
580 /**
581  * v9fs_mmap_file_write_iter - write to a file
582  * @iocb: The operation parameters
583  * @from: The data to write
584  *
585  */
586 static ssize_t
587 v9fs_mmap_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
588 {
589 	/*
590 	 * TODO: invalidate mmaps on filp's inode between
591 	 * offset and offset+count
592 	 */
593 	return v9fs_file_write_iter(iocb, from);
594 }
595 
596 static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
597 {
598 	struct inode *inode;
599 
600 	struct writeback_control wbc = {
601 		.nr_to_write = LONG_MAX,
602 		.sync_mode = WB_SYNC_ALL,
603 		.range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE,
604 		 /* absolute end, byte at end included */
605 		.range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE +
606 			(vma->vm_end - vma->vm_start - 1),
607 	};
608 
609 	if (!(vma->vm_flags & VM_SHARED))
610 		return;
611 
612 	p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
613 
614 	inode = file_inode(vma->vm_file);
615 	filemap_fdatawrite_wbc(inode->i_mapping, &wbc);
616 }
617 
618 
619 static const struct vm_operations_struct v9fs_file_vm_ops = {
620 	.fault = filemap_fault,
621 	.map_pages = filemap_map_pages,
622 	.page_mkwrite = v9fs_vm_page_mkwrite,
623 };
624 
625 static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
626 	.close = v9fs_mmap_vm_close,
627 	.fault = filemap_fault,
628 	.map_pages = filemap_map_pages,
629 	.page_mkwrite = v9fs_vm_page_mkwrite,
630 };
631 
632 
633 const struct file_operations v9fs_cached_file_operations = {
634 	.llseek = generic_file_llseek,
635 	.read_iter = generic_file_read_iter,
636 	.write_iter = generic_file_write_iter,
637 	.open = v9fs_file_open,
638 	.release = v9fs_dir_release,
639 	.lock = v9fs_file_lock,
640 	.mmap = v9fs_file_mmap,
641 	.splice_read = generic_file_splice_read,
642 	.splice_write = iter_file_splice_write,
643 	.fsync = v9fs_file_fsync,
644 };
645 
646 const struct file_operations v9fs_cached_file_operations_dotl = {
647 	.llseek = generic_file_llseek,
648 	.read_iter = generic_file_read_iter,
649 	.write_iter = generic_file_write_iter,
650 	.open = v9fs_file_open,
651 	.release = v9fs_dir_release,
652 	.lock = v9fs_file_lock_dotl,
653 	.flock = v9fs_file_flock_dotl,
654 	.mmap = v9fs_file_mmap,
655 	.splice_read = generic_file_splice_read,
656 	.splice_write = iter_file_splice_write,
657 	.fsync = v9fs_file_fsync_dotl,
658 };
659 
660 const struct file_operations v9fs_file_operations = {
661 	.llseek = generic_file_llseek,
662 	.read_iter = v9fs_file_read_iter,
663 	.write_iter = v9fs_file_write_iter,
664 	.open = v9fs_file_open,
665 	.release = v9fs_dir_release,
666 	.lock = v9fs_file_lock,
667 	.mmap = generic_file_readonly_mmap,
668 	.splice_read = generic_file_splice_read,
669 	.splice_write = iter_file_splice_write,
670 	.fsync = v9fs_file_fsync,
671 };
672 
673 const struct file_operations v9fs_file_operations_dotl = {
674 	.llseek = generic_file_llseek,
675 	.read_iter = v9fs_file_read_iter,
676 	.write_iter = v9fs_file_write_iter,
677 	.open = v9fs_file_open,
678 	.release = v9fs_dir_release,
679 	.lock = v9fs_file_lock_dotl,
680 	.flock = v9fs_file_flock_dotl,
681 	.mmap = generic_file_readonly_mmap,
682 	.splice_read = generic_file_splice_read,
683 	.splice_write = iter_file_splice_write,
684 	.fsync = v9fs_file_fsync_dotl,
685 };
686 
687 const struct file_operations v9fs_mmap_file_operations = {
688 	.llseek = generic_file_llseek,
689 	.read_iter = v9fs_mmap_file_read_iter,
690 	.write_iter = v9fs_mmap_file_write_iter,
691 	.open = v9fs_file_open,
692 	.release = v9fs_dir_release,
693 	.lock = v9fs_file_lock,
694 	.mmap = v9fs_mmap_file_mmap,
695 	.splice_read = generic_file_splice_read,
696 	.splice_write = iter_file_splice_write,
697 	.fsync = v9fs_file_fsync,
698 };
699 
700 const struct file_operations v9fs_mmap_file_operations_dotl = {
701 	.llseek = generic_file_llseek,
702 	.read_iter = v9fs_mmap_file_read_iter,
703 	.write_iter = v9fs_mmap_file_write_iter,
704 	.open = v9fs_file_open,
705 	.release = v9fs_dir_release,
706 	.lock = v9fs_file_lock_dotl,
707 	.flock = v9fs_file_flock_dotl,
708 	.mmap = v9fs_mmap_file_mmap,
709 	.splice_read = generic_file_splice_read,
710 	.splice_write = iter_file_splice_write,
711 	.fsync = v9fs_file_fsync_dotl,
712 };
713