xref: /linux/fs/nfs/file.c (revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2)
1 /*
2  *  linux/fs/nfs/file.c
3  *
4  *  Copyright (C) 1992  Rick Sladkey
5  *
6  *  Changes Copyright (C) 1994 by Florian La Roche
7  *   - Do not copy data too often around in the kernel.
8  *   - In nfs_file_read the return value of kmalloc wasn't checked.
9  *   - Put in a better version of read look-ahead buffering. Original idea
10  *     and implementation by Wai S Kok elekokws@ee.nus.sg.
11  *
12  *  Expire cache on write to a file by Wai S Kok (Oct 1994).
13  *
14  *  Total rewrite of read side for new NFS buffer cache.. Linus.
15  *
16  *  nfs regular file handling functions
17  */
18 
19 #include <linux/time.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/fcntl.h>
23 #include <linux/stat.h>
24 #include <linux/nfs_fs.h>
25 #include <linux/nfs_mount.h>
26 #include <linux/mm.h>
27 #include <linux/slab.h>
28 #include <linux/pagemap.h>
29 #include <linux/smp_lock.h>
30 
31 #include <asm/uaccess.h>
32 #include <asm/system.h>
33 
34 #include "delegation.h"
35 
36 #define NFSDBG_FACILITY		NFSDBG_FILE
37 
38 static int nfs_file_open(struct inode *, struct file *);
39 static int nfs_file_release(struct inode *, struct file *);
40 static int  nfs_file_mmap(struct file *, struct vm_area_struct *);
41 static ssize_t nfs_file_sendfile(struct file *, loff_t *, size_t, read_actor_t, void *);
42 static ssize_t nfs_file_read(struct kiocb *, char __user *, size_t, loff_t);
43 static ssize_t nfs_file_write(struct kiocb *, const char __user *, size_t, loff_t);
44 static int  nfs_file_flush(struct file *);
45 static int  nfs_fsync(struct file *, struct dentry *dentry, int datasync);
46 static int nfs_check_flags(int flags);
47 static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
48 static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl);
49 
50 struct file_operations nfs_file_operations = {
51 	.llseek		= remote_llseek,
52 	.read		= do_sync_read,
53 	.write		= do_sync_write,
54 	.aio_read		= nfs_file_read,
55 	.aio_write		= nfs_file_write,
56 	.mmap		= nfs_file_mmap,
57 	.open		= nfs_file_open,
58 	.flush		= nfs_file_flush,
59 	.release	= nfs_file_release,
60 	.fsync		= nfs_fsync,
61 	.lock		= nfs_lock,
62 	.flock		= nfs_flock,
63 	.sendfile	= nfs_file_sendfile,
64 	.check_flags	= nfs_check_flags,
65 };
66 
67 struct inode_operations nfs_file_inode_operations = {
68 	.permission	= nfs_permission,
69 	.getattr	= nfs_getattr,
70 	.setattr	= nfs_setattr,
71 };
72 
73 /* Hack for future NFS swap support */
74 #ifndef IS_SWAPFILE
75 # define IS_SWAPFILE(inode)	(0)
76 #endif
77 
78 static int nfs_check_flags(int flags)
79 {
80 	if ((flags & (O_APPEND | O_DIRECT)) == (O_APPEND | O_DIRECT))
81 		return -EINVAL;
82 
83 	return 0;
84 }
85 
86 /*
87  * Open file
88  */
89 static int
90 nfs_file_open(struct inode *inode, struct file *filp)
91 {
92 	struct nfs_server *server = NFS_SERVER(inode);
93 	int (*open)(struct inode *, struct file *);
94 	int res;
95 
96 	res = nfs_check_flags(filp->f_flags);
97 	if (res)
98 		return res;
99 
100 	lock_kernel();
101 	/* Do NFSv4 open() call */
102 	if ((open = server->rpc_ops->file_open) != NULL)
103 		res = open(inode, filp);
104 	unlock_kernel();
105 	return res;
106 }
107 
108 static int
109 nfs_file_release(struct inode *inode, struct file *filp)
110 {
111 	/* Ensure that dirty pages are flushed out with the right creds */
112 	if (filp->f_mode & FMODE_WRITE)
113 		filemap_fdatawrite(filp->f_mapping);
114 	return NFS_PROTO(inode)->file_release(inode, filp);
115 }
116 
117 /*
118  * Flush all dirty pages, and check for write errors.
119  *
120  */
121 static int
122 nfs_file_flush(struct file *file)
123 {
124 	struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
125 	struct inode	*inode = file->f_dentry->d_inode;
126 	int		status;
127 
128 	dfprintk(VFS, "nfs: flush(%s/%ld)\n", inode->i_sb->s_id, inode->i_ino);
129 
130 	if ((file->f_mode & FMODE_WRITE) == 0)
131 		return 0;
132 	lock_kernel();
133 	/* Ensure that data+attribute caches are up to date after close() */
134 	status = nfs_wb_all(inode);
135 	if (!status) {
136 		status = ctx->error;
137 		ctx->error = 0;
138 		if (!status && !nfs_have_delegation(inode, FMODE_READ))
139 			__nfs_revalidate_inode(NFS_SERVER(inode), inode);
140 	}
141 	unlock_kernel();
142 	return status;
143 }
144 
145 static ssize_t
146 nfs_file_read(struct kiocb *iocb, char __user * buf, size_t count, loff_t pos)
147 {
148 	struct dentry * dentry = iocb->ki_filp->f_dentry;
149 	struct inode * inode = dentry->d_inode;
150 	ssize_t result;
151 
152 #ifdef CONFIG_NFS_DIRECTIO
153 	if (iocb->ki_filp->f_flags & O_DIRECT)
154 		return nfs_file_direct_read(iocb, buf, count, pos);
155 #endif
156 
157 	dfprintk(VFS, "nfs: read(%s/%s, %lu@%lu)\n",
158 		dentry->d_parent->d_name.name, dentry->d_name.name,
159 		(unsigned long) count, (unsigned long) pos);
160 
161 	result = nfs_revalidate_inode(NFS_SERVER(inode), inode);
162 	if (!result)
163 		result = generic_file_aio_read(iocb, buf, count, pos);
164 	return result;
165 }
166 
167 static ssize_t
168 nfs_file_sendfile(struct file *filp, loff_t *ppos, size_t count,
169 		read_actor_t actor, void *target)
170 {
171 	struct dentry *dentry = filp->f_dentry;
172 	struct inode *inode = dentry->d_inode;
173 	ssize_t res;
174 
175 	dfprintk(VFS, "nfs: sendfile(%s/%s, %lu@%Lu)\n",
176 		dentry->d_parent->d_name.name, dentry->d_name.name,
177 		(unsigned long) count, (unsigned long long) *ppos);
178 
179 	res = nfs_revalidate_inode(NFS_SERVER(inode), inode);
180 	if (!res)
181 		res = generic_file_sendfile(filp, ppos, count, actor, target);
182 	return res;
183 }
184 
185 static int
186 nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
187 {
188 	struct dentry *dentry = file->f_dentry;
189 	struct inode *inode = dentry->d_inode;
190 	int	status;
191 
192 	dfprintk(VFS, "nfs: mmap(%s/%s)\n",
193 		dentry->d_parent->d_name.name, dentry->d_name.name);
194 
195 	status = nfs_revalidate_inode(NFS_SERVER(inode), inode);
196 	if (!status)
197 		status = generic_file_mmap(file, vma);
198 	return status;
199 }
200 
201 /*
202  * Flush any dirty pages for this process, and check for write errors.
203  * The return status from this call provides a reliable indication of
204  * whether any write errors occurred for this process.
205  */
206 static int
207 nfs_fsync(struct file *file, struct dentry *dentry, int datasync)
208 {
209 	struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
210 	struct inode *inode = dentry->d_inode;
211 	int status;
212 
213 	dfprintk(VFS, "nfs: fsync(%s/%ld)\n", inode->i_sb->s_id, inode->i_ino);
214 
215 	lock_kernel();
216 	status = nfs_wb_all(inode);
217 	if (!status) {
218 		status = ctx->error;
219 		ctx->error = 0;
220 	}
221 	unlock_kernel();
222 	return status;
223 }
224 
225 /*
226  * This does the "real" work of the write. The generic routine has
227  * allocated the page, locked it, done all the page alignment stuff
228  * calculations etc. Now we should just copy the data from user
229  * space and write it back to the real medium..
230  *
231  * If the writer ends up delaying the write, the writer needs to
232  * increment the page use counts until he is done with the page.
233  */
234 static int nfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
235 {
236 	return nfs_flush_incompatible(file, page);
237 }
238 
239 static int nfs_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
240 {
241 	long status;
242 
243 	lock_kernel();
244 	status = nfs_updatepage(file, page, offset, to-offset);
245 	unlock_kernel();
246 	return status;
247 }
248 
249 struct address_space_operations nfs_file_aops = {
250 	.readpage = nfs_readpage,
251 	.readpages = nfs_readpages,
252 	.set_page_dirty = __set_page_dirty_nobuffers,
253 	.writepage = nfs_writepage,
254 	.writepages = nfs_writepages,
255 	.prepare_write = nfs_prepare_write,
256 	.commit_write = nfs_commit_write,
257 #ifdef CONFIG_NFS_DIRECTIO
258 	.direct_IO = nfs_direct_IO,
259 #endif
260 };
261 
262 /*
263  * Write to a file (through the page cache).
264  */
265 static ssize_t
266 nfs_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
267 {
268 	struct dentry * dentry = iocb->ki_filp->f_dentry;
269 	struct inode * inode = dentry->d_inode;
270 	ssize_t result;
271 
272 #ifdef CONFIG_NFS_DIRECTIO
273 	if (iocb->ki_filp->f_flags & O_DIRECT)
274 		return nfs_file_direct_write(iocb, buf, count, pos);
275 #endif
276 
277 	dfprintk(VFS, "nfs: write(%s/%s(%ld), %lu@%lu)\n",
278 		dentry->d_parent->d_name.name, dentry->d_name.name,
279 		inode->i_ino, (unsigned long) count, (unsigned long) pos);
280 
281 	result = -EBUSY;
282 	if (IS_SWAPFILE(inode))
283 		goto out_swapfile;
284 	result = nfs_revalidate_inode(NFS_SERVER(inode), inode);
285 	if (result)
286 		goto out;
287 
288 	result = count;
289 	if (!count)
290 		goto out;
291 
292 	result = generic_file_aio_write(iocb, buf, count, pos);
293 out:
294 	return result;
295 
296 out_swapfile:
297 	printk(KERN_INFO "NFS: attempt to write to active swap file!\n");
298 	goto out;
299 }
300 
301 static int do_getlk(struct file *filp, int cmd, struct file_lock *fl)
302 {
303 	struct inode *inode = filp->f_mapping->host;
304 	int status = 0;
305 
306 	lock_kernel();
307 	/* Use local locking if mounted with "-onolock" */
308 	if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM))
309 		status = NFS_PROTO(inode)->lock(filp, cmd, fl);
310 	else {
311 		struct file_lock *cfl = posix_test_lock(filp, fl);
312 
313 		fl->fl_type = F_UNLCK;
314 		if (cfl != NULL)
315 			memcpy(fl, cfl, sizeof(*fl));
316 	}
317 	unlock_kernel();
318 	return status;
319 }
320 
321 static int do_vfs_lock(struct file *file, struct file_lock *fl)
322 {
323 	int res = 0;
324 	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
325 		case FL_POSIX:
326 			res = posix_lock_file_wait(file, fl);
327 			break;
328 		case FL_FLOCK:
329 			res = flock_lock_file_wait(file, fl);
330 			break;
331 		default:
332 			BUG();
333 	}
334 	if (res < 0)
335 		printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n",
336 				__FUNCTION__);
337 	return res;
338 }
339 
340 static int do_unlk(struct file *filp, int cmd, struct file_lock *fl)
341 {
342 	struct inode *inode = filp->f_mapping->host;
343 	sigset_t oldset;
344 	int status;
345 
346 	rpc_clnt_sigmask(NFS_CLIENT(inode), &oldset);
347 	/*
348 	 * Flush all pending writes before doing anything
349 	 * with locks..
350 	 */
351 	filemap_fdatawrite(filp->f_mapping);
352 	down(&inode->i_sem);
353 	nfs_wb_all(inode);
354 	up(&inode->i_sem);
355 	filemap_fdatawait(filp->f_mapping);
356 
357 	/* NOTE: special case
358 	 * 	If we're signalled while cleaning up locks on process exit, we
359 	 * 	still need to complete the unlock.
360 	 */
361 	lock_kernel();
362 	/* Use local locking if mounted with "-onolock" */
363 	if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM))
364 		status = NFS_PROTO(inode)->lock(filp, cmd, fl);
365 	else
366 		status = do_vfs_lock(filp, fl);
367 	unlock_kernel();
368 	rpc_clnt_sigunmask(NFS_CLIENT(inode), &oldset);
369 	return status;
370 }
371 
372 static int do_setlk(struct file *filp, int cmd, struct file_lock *fl)
373 {
374 	struct inode *inode = filp->f_mapping->host;
375 	sigset_t oldset;
376 	int status;
377 
378 	rpc_clnt_sigmask(NFS_CLIENT(inode), &oldset);
379 	/*
380 	 * Flush all pending writes before doing anything
381 	 * with locks..
382 	 */
383 	status = filemap_fdatawrite(filp->f_mapping);
384 	if (status == 0) {
385 		down(&inode->i_sem);
386 		status = nfs_wb_all(inode);
387 		up(&inode->i_sem);
388 		if (status == 0)
389 			status = filemap_fdatawait(filp->f_mapping);
390 	}
391 	if (status < 0)
392 		goto out;
393 
394 	lock_kernel();
395 	/* Use local locking if mounted with "-onolock" */
396 	if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM)) {
397 		status = NFS_PROTO(inode)->lock(filp, cmd, fl);
398 		/* If we were signalled we still need to ensure that
399 		 * we clean up any state on the server. We therefore
400 		 * record the lock call as having succeeded in order to
401 		 * ensure that locks_remove_posix() cleans it out when
402 		 * the process exits.
403 		 */
404 		if (status == -EINTR || status == -ERESTARTSYS)
405 			do_vfs_lock(filp, fl);
406 	} else
407 		status = do_vfs_lock(filp, fl);
408 	unlock_kernel();
409 	if (status < 0)
410 		goto out;
411 	/*
412 	 * Make sure we clear the cache whenever we try to get the lock.
413 	 * This makes locking act as a cache coherency point.
414 	 */
415 	filemap_fdatawrite(filp->f_mapping);
416 	down(&inode->i_sem);
417 	nfs_wb_all(inode);	/* we may have slept */
418 	up(&inode->i_sem);
419 	filemap_fdatawait(filp->f_mapping);
420 	nfs_zap_caches(inode);
421 out:
422 	rpc_clnt_sigunmask(NFS_CLIENT(inode), &oldset);
423 	return status;
424 }
425 
426 /*
427  * Lock a (portion of) a file
428  */
429 static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
430 {
431 	struct inode * inode = filp->f_mapping->host;
432 
433 	dprintk("NFS: nfs_lock(f=%s/%ld, t=%x, fl=%x, r=%Ld:%Ld)\n",
434 			inode->i_sb->s_id, inode->i_ino,
435 			fl->fl_type, fl->fl_flags,
436 			(long long)fl->fl_start, (long long)fl->fl_end);
437 
438 	if (!inode)
439 		return -EINVAL;
440 
441 	/* No mandatory locks over NFS */
442 	if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
443 		return -ENOLCK;
444 
445 	if (IS_GETLK(cmd))
446 		return do_getlk(filp, cmd, fl);
447 	if (fl->fl_type == F_UNLCK)
448 		return do_unlk(filp, cmd, fl);
449 	return do_setlk(filp, cmd, fl);
450 }
451 
452 /*
453  * Lock a (portion of) a file
454  */
455 static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
456 {
457 	struct inode * inode = filp->f_mapping->host;
458 
459 	dprintk("NFS: nfs_flock(f=%s/%ld, t=%x, fl=%x)\n",
460 			inode->i_sb->s_id, inode->i_ino,
461 			fl->fl_type, fl->fl_flags);
462 
463 	if (!inode)
464 		return -EINVAL;
465 
466 	/*
467 	 * No BSD flocks over NFS allowed.
468 	 * Note: we could try to fake a POSIX lock request here by
469 	 * using ((u32) filp | 0x80000000) or some such as the pid.
470 	 * Not sure whether that would be unique, though, or whether
471 	 * that would break in other places.
472 	 */
473 	if (!(fl->fl_flags & FL_FLOCK))
474 		return -ENOLCK;
475 
476 	/* We're simulating flock() locks using posix locks on the server */
477 	fl->fl_owner = (fl_owner_t)filp;
478 	fl->fl_start = 0;
479 	fl->fl_end = OFFSET_MAX;
480 
481 	if (fl->fl_type == F_UNLCK)
482 		return do_unlk(filp, cmd, fl);
483 	return do_setlk(filp, cmd, fl);
484 }
485