1 /* 2 * linux/fs/nfs/file.c 3 * 4 * Copyright (C) 1992 Rick Sladkey 5 * 6 * Changes Copyright (C) 1994 by Florian La Roche 7 * - Do not copy data too often around in the kernel. 8 * - In nfs_file_read the return value of kmalloc wasn't checked. 9 * - Put in a better version of read look-ahead buffering. Original idea 10 * and implementation by Wai S Kok elekokws@ee.nus.sg. 11 * 12 * Expire cache on write to a file by Wai S Kok (Oct 1994). 13 * 14 * Total rewrite of read side for new NFS buffer cache.. Linus. 15 * 16 * nfs regular file handling functions 17 */ 18 19 #include <linux/time.h> 20 #include <linux/kernel.h> 21 #include <linux/errno.h> 22 #include <linux/fcntl.h> 23 #include <linux/stat.h> 24 #include <linux/nfs_fs.h> 25 #include <linux/nfs_mount.h> 26 #include <linux/mm.h> 27 #include <linux/slab.h> 28 #include <linux/pagemap.h> 29 #include <linux/smp_lock.h> 30 31 #include <asm/uaccess.h> 32 #include <asm/system.h> 33 34 #include "delegation.h" 35 #include "iostat.h" 36 37 #define NFSDBG_FACILITY NFSDBG_FILE 38 39 static int nfs_file_open(struct inode *, struct file *); 40 static int nfs_file_release(struct inode *, struct file *); 41 static loff_t nfs_file_llseek(struct file *file, loff_t offset, int origin); 42 static int nfs_file_mmap(struct file *, struct vm_area_struct *); 43 static ssize_t nfs_file_sendfile(struct file *, loff_t *, size_t, read_actor_t, void *); 44 static ssize_t nfs_file_read(struct kiocb *, char __user *, size_t, loff_t); 45 static ssize_t nfs_file_write(struct kiocb *, const char __user *, size_t, loff_t); 46 static int nfs_file_flush(struct file *); 47 static int nfs_fsync(struct file *, struct dentry *dentry, int datasync); 48 static int nfs_check_flags(int flags); 49 static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl); 50 static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl); 51 52 const struct file_operations nfs_file_operations = { 53 .llseek = nfs_file_llseek, 54 .read = do_sync_read, 55 .write = do_sync_write, 56 .aio_read = nfs_file_read, 57 .aio_write = nfs_file_write, 58 .mmap = nfs_file_mmap, 59 .open = nfs_file_open, 60 .flush = nfs_file_flush, 61 .release = nfs_file_release, 62 .fsync = nfs_fsync, 63 .lock = nfs_lock, 64 .flock = nfs_flock, 65 .sendfile = nfs_file_sendfile, 66 .check_flags = nfs_check_flags, 67 }; 68 69 struct inode_operations nfs_file_inode_operations = { 70 .permission = nfs_permission, 71 .getattr = nfs_getattr, 72 .setattr = nfs_setattr, 73 }; 74 75 #ifdef CONFIG_NFS_V3 76 struct inode_operations nfs3_file_inode_operations = { 77 .permission = nfs_permission, 78 .getattr = nfs_getattr, 79 .setattr = nfs_setattr, 80 .listxattr = nfs3_listxattr, 81 .getxattr = nfs3_getxattr, 82 .setxattr = nfs3_setxattr, 83 .removexattr = nfs3_removexattr, 84 }; 85 #endif /* CONFIG_NFS_v3 */ 86 87 /* Hack for future NFS swap support */ 88 #ifndef IS_SWAPFILE 89 # define IS_SWAPFILE(inode) (0) 90 #endif 91 92 static int nfs_check_flags(int flags) 93 { 94 if ((flags & (O_APPEND | O_DIRECT)) == (O_APPEND | O_DIRECT)) 95 return -EINVAL; 96 97 return 0; 98 } 99 100 /* 101 * Open file 102 */ 103 static int 104 nfs_file_open(struct inode *inode, struct file *filp) 105 { 106 int res; 107 108 res = nfs_check_flags(filp->f_flags); 109 if (res) 110 return res; 111 112 nfs_inc_stats(inode, NFSIOS_VFSOPEN); 113 lock_kernel(); 114 res = NFS_SERVER(inode)->rpc_ops->file_open(inode, filp); 115 unlock_kernel(); 116 return res; 117 } 118 119 static int 120 nfs_file_release(struct inode *inode, struct file *filp) 121 { 122 /* Ensure that dirty pages are flushed out with the right creds */ 123 if (filp->f_mode & FMODE_WRITE) 124 filemap_fdatawrite(filp->f_mapping); 125 nfs_inc_stats(inode, NFSIOS_VFSRELEASE); 126 return NFS_PROTO(inode)->file_release(inode, filp); 127 } 128 129 /** 130 * nfs_revalidate_file - Revalidate the page cache & related metadata 131 * @inode - pointer to inode struct 132 * @file - pointer to file 133 */ 134 static int nfs_revalidate_file(struct inode *inode, struct file *filp) 135 { 136 struct nfs_inode *nfsi = NFS_I(inode); 137 int retval = 0; 138 139 if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR)) 140 || nfs_attribute_timeout(inode)) 141 retval = __nfs_revalidate_inode(NFS_SERVER(inode), inode); 142 nfs_revalidate_mapping(inode, filp->f_mapping); 143 return 0; 144 } 145 146 /** 147 * nfs_revalidate_size - Revalidate the file size 148 * @inode - pointer to inode struct 149 * @file - pointer to struct file 150 * 151 * Revalidates the file length. This is basically a wrapper around 152 * nfs_revalidate_inode() that takes into account the fact that we may 153 * have cached writes (in which case we don't care about the server's 154 * idea of what the file length is), or O_DIRECT (in which case we 155 * shouldn't trust the cache). 156 */ 157 static int nfs_revalidate_file_size(struct inode *inode, struct file *filp) 158 { 159 struct nfs_server *server = NFS_SERVER(inode); 160 struct nfs_inode *nfsi = NFS_I(inode); 161 162 if (server->flags & NFS_MOUNT_NOAC) 163 goto force_reval; 164 if (filp->f_flags & O_DIRECT) 165 goto force_reval; 166 if (nfsi->npages != 0) 167 return 0; 168 if (!(nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) && !nfs_attribute_timeout(inode)) 169 return 0; 170 force_reval: 171 return __nfs_revalidate_inode(server, inode); 172 } 173 174 static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin) 175 { 176 /* origin == SEEK_END => we must revalidate the cached file length */ 177 if (origin == 2) { 178 struct inode *inode = filp->f_mapping->host; 179 int retval = nfs_revalidate_file_size(inode, filp); 180 if (retval < 0) 181 return (loff_t)retval; 182 } 183 return remote_llseek(filp, offset, origin); 184 } 185 186 /* 187 * Flush all dirty pages, and check for write errors. 188 * 189 */ 190 static int 191 nfs_file_flush(struct file *file) 192 { 193 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; 194 struct inode *inode = file->f_dentry->d_inode; 195 int status; 196 197 dfprintk(VFS, "nfs: flush(%s/%ld)\n", inode->i_sb->s_id, inode->i_ino); 198 199 if ((file->f_mode & FMODE_WRITE) == 0) 200 return 0; 201 nfs_inc_stats(inode, NFSIOS_VFSFLUSH); 202 lock_kernel(); 203 /* Ensure that data+attribute caches are up to date after close() */ 204 status = nfs_wb_all(inode); 205 if (!status) { 206 status = ctx->error; 207 ctx->error = 0; 208 if (!status) 209 nfs_revalidate_inode(NFS_SERVER(inode), inode); 210 } 211 unlock_kernel(); 212 return status; 213 } 214 215 static ssize_t 216 nfs_file_read(struct kiocb *iocb, char __user * buf, size_t count, loff_t pos) 217 { 218 struct dentry * dentry = iocb->ki_filp->f_dentry; 219 struct inode * inode = dentry->d_inode; 220 ssize_t result; 221 222 #ifdef CONFIG_NFS_DIRECTIO 223 if (iocb->ki_filp->f_flags & O_DIRECT) 224 return nfs_file_direct_read(iocb, buf, count, pos); 225 #endif 226 227 dfprintk(VFS, "nfs: read(%s/%s, %lu@%lu)\n", 228 dentry->d_parent->d_name.name, dentry->d_name.name, 229 (unsigned long) count, (unsigned long) pos); 230 231 result = nfs_revalidate_file(inode, iocb->ki_filp); 232 nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, count); 233 if (!result) 234 result = generic_file_aio_read(iocb, buf, count, pos); 235 return result; 236 } 237 238 static ssize_t 239 nfs_file_sendfile(struct file *filp, loff_t *ppos, size_t count, 240 read_actor_t actor, void *target) 241 { 242 struct dentry *dentry = filp->f_dentry; 243 struct inode *inode = dentry->d_inode; 244 ssize_t res; 245 246 dfprintk(VFS, "nfs: sendfile(%s/%s, %lu@%Lu)\n", 247 dentry->d_parent->d_name.name, dentry->d_name.name, 248 (unsigned long) count, (unsigned long long) *ppos); 249 250 res = nfs_revalidate_file(inode, filp); 251 if (!res) 252 res = generic_file_sendfile(filp, ppos, count, actor, target); 253 return res; 254 } 255 256 static int 257 nfs_file_mmap(struct file * file, struct vm_area_struct * vma) 258 { 259 struct dentry *dentry = file->f_dentry; 260 struct inode *inode = dentry->d_inode; 261 int status; 262 263 dfprintk(VFS, "nfs: mmap(%s/%s)\n", 264 dentry->d_parent->d_name.name, dentry->d_name.name); 265 266 status = nfs_revalidate_file(inode, file); 267 if (!status) 268 status = generic_file_mmap(file, vma); 269 return status; 270 } 271 272 /* 273 * Flush any dirty pages for this process, and check for write errors. 274 * The return status from this call provides a reliable indication of 275 * whether any write errors occurred for this process. 276 */ 277 static int 278 nfs_fsync(struct file *file, struct dentry *dentry, int datasync) 279 { 280 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; 281 struct inode *inode = dentry->d_inode; 282 int status; 283 284 dfprintk(VFS, "nfs: fsync(%s/%ld)\n", inode->i_sb->s_id, inode->i_ino); 285 286 nfs_inc_stats(inode, NFSIOS_VFSFSYNC); 287 lock_kernel(); 288 status = nfs_wb_all(inode); 289 if (!status) { 290 status = ctx->error; 291 ctx->error = 0; 292 } 293 unlock_kernel(); 294 return status; 295 } 296 297 /* 298 * This does the "real" work of the write. The generic routine has 299 * allocated the page, locked it, done all the page alignment stuff 300 * calculations etc. Now we should just copy the data from user 301 * space and write it back to the real medium.. 302 * 303 * If the writer ends up delaying the write, the writer needs to 304 * increment the page use counts until he is done with the page. 305 */ 306 static int nfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to) 307 { 308 return nfs_flush_incompatible(file, page); 309 } 310 311 static int nfs_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to) 312 { 313 long status; 314 315 lock_kernel(); 316 status = nfs_updatepage(file, page, offset, to-offset); 317 unlock_kernel(); 318 return status; 319 } 320 321 static void nfs_invalidate_page(struct page *page, unsigned long offset) 322 { 323 /* FIXME: we really should cancel any unstarted writes on this page */ 324 } 325 326 static int nfs_release_page(struct page *page, gfp_t gfp) 327 { 328 return !nfs_wb_page(page->mapping->host, page); 329 } 330 331 struct address_space_operations nfs_file_aops = { 332 .readpage = nfs_readpage, 333 .readpages = nfs_readpages, 334 .set_page_dirty = __set_page_dirty_nobuffers, 335 .writepage = nfs_writepage, 336 .writepages = nfs_writepages, 337 .prepare_write = nfs_prepare_write, 338 .commit_write = nfs_commit_write, 339 .invalidatepage = nfs_invalidate_page, 340 .releasepage = nfs_release_page, 341 #ifdef CONFIG_NFS_DIRECTIO 342 .direct_IO = nfs_direct_IO, 343 #endif 344 }; 345 346 /* 347 * Write to a file (through the page cache). 348 */ 349 static ssize_t 350 nfs_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos) 351 { 352 struct dentry * dentry = iocb->ki_filp->f_dentry; 353 struct inode * inode = dentry->d_inode; 354 ssize_t result; 355 356 #ifdef CONFIG_NFS_DIRECTIO 357 if (iocb->ki_filp->f_flags & O_DIRECT) 358 return nfs_file_direct_write(iocb, buf, count, pos); 359 #endif 360 361 dfprintk(VFS, "nfs: write(%s/%s(%ld), %lu@%lu)\n", 362 dentry->d_parent->d_name.name, dentry->d_name.name, 363 inode->i_ino, (unsigned long) count, (unsigned long) pos); 364 365 result = -EBUSY; 366 if (IS_SWAPFILE(inode)) 367 goto out_swapfile; 368 /* 369 * O_APPEND implies that we must revalidate the file length. 370 */ 371 if (iocb->ki_filp->f_flags & O_APPEND) { 372 result = nfs_revalidate_file_size(inode, iocb->ki_filp); 373 if (result) 374 goto out; 375 } 376 nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping); 377 378 result = count; 379 if (!count) 380 goto out; 381 382 nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count); 383 result = generic_file_aio_write(iocb, buf, count, pos); 384 out: 385 return result; 386 387 out_swapfile: 388 printk(KERN_INFO "NFS: attempt to write to active swap file!\n"); 389 goto out; 390 } 391 392 static int do_getlk(struct file *filp, int cmd, struct file_lock *fl) 393 { 394 struct file_lock cfl; 395 struct inode *inode = filp->f_mapping->host; 396 int status = 0; 397 398 lock_kernel(); 399 /* Try local locking first */ 400 if (posix_test_lock(filp, fl, &cfl)) { 401 fl->fl_start = cfl.fl_start; 402 fl->fl_end = cfl.fl_end; 403 fl->fl_type = cfl.fl_type; 404 fl->fl_pid = cfl.fl_pid; 405 goto out; 406 } 407 408 if (nfs_have_delegation(inode, FMODE_READ)) 409 goto out_noconflict; 410 411 if (NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM) 412 goto out_noconflict; 413 414 status = NFS_PROTO(inode)->lock(filp, cmd, fl); 415 out: 416 unlock_kernel(); 417 return status; 418 out_noconflict: 419 fl->fl_type = F_UNLCK; 420 goto out; 421 } 422 423 static int do_vfs_lock(struct file *file, struct file_lock *fl) 424 { 425 int res = 0; 426 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { 427 case FL_POSIX: 428 res = posix_lock_file_wait(file, fl); 429 break; 430 case FL_FLOCK: 431 res = flock_lock_file_wait(file, fl); 432 break; 433 default: 434 BUG(); 435 } 436 if (res < 0) 437 printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", 438 __FUNCTION__); 439 return res; 440 } 441 442 static int do_unlk(struct file *filp, int cmd, struct file_lock *fl) 443 { 444 struct inode *inode = filp->f_mapping->host; 445 int status; 446 447 /* 448 * Flush all pending writes before doing anything 449 * with locks.. 450 */ 451 nfs_sync_mapping(filp->f_mapping); 452 453 /* NOTE: special case 454 * If we're signalled while cleaning up locks on process exit, we 455 * still need to complete the unlock. 456 */ 457 lock_kernel(); 458 /* Use local locking if mounted with "-onolock" */ 459 if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM)) 460 status = NFS_PROTO(inode)->lock(filp, cmd, fl); 461 else 462 status = do_vfs_lock(filp, fl); 463 unlock_kernel(); 464 return status; 465 } 466 467 static int do_setlk(struct file *filp, int cmd, struct file_lock *fl) 468 { 469 struct inode *inode = filp->f_mapping->host; 470 int status; 471 472 /* 473 * Flush all pending writes before doing anything 474 * with locks.. 475 */ 476 status = nfs_sync_mapping(filp->f_mapping); 477 if (status != 0) 478 goto out; 479 480 lock_kernel(); 481 /* Use local locking if mounted with "-onolock" */ 482 if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM)) { 483 status = NFS_PROTO(inode)->lock(filp, cmd, fl); 484 /* If we were signalled we still need to ensure that 485 * we clean up any state on the server. We therefore 486 * record the lock call as having succeeded in order to 487 * ensure that locks_remove_posix() cleans it out when 488 * the process exits. 489 */ 490 if (status == -EINTR || status == -ERESTARTSYS) 491 do_vfs_lock(filp, fl); 492 } else 493 status = do_vfs_lock(filp, fl); 494 unlock_kernel(); 495 if (status < 0) 496 goto out; 497 /* 498 * Make sure we clear the cache whenever we try to get the lock. 499 * This makes locking act as a cache coherency point. 500 */ 501 nfs_sync_mapping(filp->f_mapping); 502 nfs_zap_caches(inode); 503 out: 504 return status; 505 } 506 507 /* 508 * Lock a (portion of) a file 509 */ 510 static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl) 511 { 512 struct inode * inode = filp->f_mapping->host; 513 514 dprintk("NFS: nfs_lock(f=%s/%ld, t=%x, fl=%x, r=%Ld:%Ld)\n", 515 inode->i_sb->s_id, inode->i_ino, 516 fl->fl_type, fl->fl_flags, 517 (long long)fl->fl_start, (long long)fl->fl_end); 518 nfs_inc_stats(inode, NFSIOS_VFSLOCK); 519 520 /* No mandatory locks over NFS */ 521 if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && 522 fl->fl_type != F_UNLCK) 523 return -ENOLCK; 524 525 if (IS_GETLK(cmd)) 526 return do_getlk(filp, cmd, fl); 527 if (fl->fl_type == F_UNLCK) 528 return do_unlk(filp, cmd, fl); 529 return do_setlk(filp, cmd, fl); 530 } 531 532 /* 533 * Lock a (portion of) a file 534 */ 535 static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl) 536 { 537 dprintk("NFS: nfs_flock(f=%s/%ld, t=%x, fl=%x)\n", 538 filp->f_dentry->d_inode->i_sb->s_id, 539 filp->f_dentry->d_inode->i_ino, 540 fl->fl_type, fl->fl_flags); 541 542 /* 543 * No BSD flocks over NFS allowed. 544 * Note: we could try to fake a POSIX lock request here by 545 * using ((u32) filp | 0x80000000) or some such as the pid. 546 * Not sure whether that would be unique, though, or whether 547 * that would break in other places. 548 */ 549 if (!(fl->fl_flags & FL_FLOCK)) 550 return -ENOLCK; 551 552 /* We're simulating flock() locks using posix locks on the server */ 553 fl->fl_owner = (fl_owner_t)filp; 554 fl->fl_start = 0; 555 fl->fl_end = OFFSET_MAX; 556 557 if (fl->fl_type == F_UNLCK) 558 return do_unlk(filp, cmd, fl); 559 return do_setlk(filp, cmd, fl); 560 } 561