1 /* 2 * linux/fs/nfs/direct.c 3 * 4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com> 5 * 6 * High-performance uncached I/O for the Linux NFS client 7 * 8 * There are important applications whose performance or correctness 9 * depends on uncached access to file data. Database clusters 10 * (multiple copies of the same instance running on separate hosts) 11 * implement their own cache coherency protocol that subsumes file 12 * system cache protocols. Applications that process datasets 13 * considerably larger than the client's memory do not always benefit 14 * from a local cache. A streaming video server, for instance, has no 15 * need to cache the contents of a file. 16 * 17 * When an application requests uncached I/O, all read and write requests 18 * are made directly to the server; data stored or fetched via these 19 * requests is not cached in the Linux page cache. The client does not 20 * correct unaligned requests from applications. All requested bytes are 21 * held on permanent storage before a direct write system call returns to 22 * an application. 23 * 24 * Solaris implements an uncached I/O facility called directio() that 25 * is used for backups and sequential I/O to very large files. Solaris 26 * also supports uncaching whole NFS partitions with "-o forcedirectio," 27 * an undocumented mount option. 28 * 29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with 30 * help from Andrew Morton. 31 * 32 * 18 Dec 2001 Initial implementation for 2.4 --cel 33 * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy 34 * 08 Jun 2003 Port to 2.5 APIs --cel 35 * 31 Mar 2004 Handle direct I/O without VFS support --cel 36 * 15 Sep 2004 Parallel async reads --cel 37 * 04 May 2005 support O_DIRECT with aio --cel 38 * 39 */ 40 41 #include <linux/errno.h> 42 #include <linux/sched.h> 43 #include <linux/kernel.h> 44 #include <linux/smp_lock.h> 45 #include <linux/file.h> 46 #include <linux/pagemap.h> 47 #include <linux/kref.h> 48 49 #include <linux/nfs_fs.h> 50 #include <linux/nfs_page.h> 51 #include <linux/sunrpc/clnt.h> 52 53 #include <asm/system.h> 54 #include <asm/uaccess.h> 55 #include <asm/atomic.h> 56 57 #include "iostat.h" 58 59 #define NFSDBG_FACILITY NFSDBG_VFS 60 61 static struct kmem_cache *nfs_direct_cachep; 62 63 /* 64 * This represents a set of asynchronous requests that we're waiting on 65 */ 66 struct nfs_direct_req { 67 struct kref kref; /* release manager */ 68 69 /* I/O parameters */ 70 struct nfs_open_context *ctx; /* file open context info */ 71 struct kiocb * iocb; /* controlling i/o request */ 72 struct inode * inode; /* target file of i/o */ 73 74 /* completion state */ 75 atomic_t io_count; /* i/os we're waiting for */ 76 spinlock_t lock; /* protect completion state */ 77 ssize_t count, /* bytes actually processed */ 78 error; /* any reported error */ 79 struct completion completion; /* wait for i/o completion */ 80 81 /* commit state */ 82 struct list_head rewrite_list; /* saved nfs_write_data structs */ 83 struct nfs_write_data * commit_data; /* special write_data for commits */ 84 int flags; 85 #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */ 86 #define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */ 87 struct nfs_writeverf verf; /* unstable write verifier */ 88 }; 89 90 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode); 91 static const struct rpc_call_ops nfs_write_direct_ops; 92 93 static inline void get_dreq(struct nfs_direct_req *dreq) 94 { 95 atomic_inc(&dreq->io_count); 96 } 97 98 static inline int put_dreq(struct nfs_direct_req *dreq) 99 { 100 return atomic_dec_and_test(&dreq->io_count); 101 } 102 103 /** 104 * nfs_direct_IO - NFS address space operation for direct I/O 105 * @rw: direction (read or write) 106 * @iocb: target I/O control block 107 * @iov: array of vectors that define I/O buffer 108 * @pos: offset in file to begin the operation 109 * @nr_segs: size of iovec array 110 * 111 * The presence of this routine in the address space ops vector means 112 * the NFS client supports direct I/O. However, we shunt off direct 113 * read and write requests before the VFS gets them, so this method 114 * should never be called. 115 */ 116 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs) 117 { 118 dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n", 119 iocb->ki_filp->f_path.dentry->d_name.name, 120 (long long) pos, nr_segs); 121 122 return -EINVAL; 123 } 124 125 static void nfs_direct_dirty_pages(struct page **pages, int npages) 126 { 127 int i; 128 for (i = 0; i < npages; i++) { 129 struct page *page = pages[i]; 130 if (!PageCompound(page)) 131 set_page_dirty_lock(page); 132 } 133 } 134 135 static void nfs_direct_release_pages(struct page **pages, int npages) 136 { 137 int i; 138 for (i = 0; i < npages; i++) 139 page_cache_release(pages[i]); 140 } 141 142 static inline struct nfs_direct_req *nfs_direct_req_alloc(void) 143 { 144 struct nfs_direct_req *dreq; 145 146 dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL); 147 if (!dreq) 148 return NULL; 149 150 kref_init(&dreq->kref); 151 kref_get(&dreq->kref); 152 init_completion(&dreq->completion); 153 INIT_LIST_HEAD(&dreq->rewrite_list); 154 dreq->iocb = NULL; 155 dreq->ctx = NULL; 156 spin_lock_init(&dreq->lock); 157 atomic_set(&dreq->io_count, 0); 158 dreq->count = 0; 159 dreq->error = 0; 160 dreq->flags = 0; 161 162 return dreq; 163 } 164 165 static void nfs_direct_req_release(struct kref *kref) 166 { 167 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref); 168 169 if (dreq->ctx != NULL) 170 put_nfs_open_context(dreq->ctx); 171 kmem_cache_free(nfs_direct_cachep, dreq); 172 } 173 174 /* 175 * Collects and returns the final error value/byte-count. 176 */ 177 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq) 178 { 179 ssize_t result = -EIOCBQUEUED; 180 181 /* Async requests don't wait here */ 182 if (dreq->iocb) 183 goto out; 184 185 result = wait_for_completion_interruptible(&dreq->completion); 186 187 if (!result) 188 result = dreq->error; 189 if (!result) 190 result = dreq->count; 191 192 out: 193 kref_put(&dreq->kref, nfs_direct_req_release); 194 return (ssize_t) result; 195 } 196 197 /* 198 * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust 199 * the iocb is still valid here if this is a synchronous request. 200 */ 201 static void nfs_direct_complete(struct nfs_direct_req *dreq) 202 { 203 if (dreq->iocb) { 204 long res = (long) dreq->error; 205 if (!res) 206 res = (long) dreq->count; 207 aio_complete(dreq->iocb, res, 0); 208 } 209 complete_all(&dreq->completion); 210 211 kref_put(&dreq->kref, nfs_direct_req_release); 212 } 213 214 /* 215 * We must hold a reference to all the pages in this direct read request 216 * until the RPCs complete. This could be long *after* we are woken up in 217 * nfs_direct_wait (for instance, if someone hits ^C on a slow server). 218 */ 219 static void nfs_direct_read_result(struct rpc_task *task, void *calldata) 220 { 221 struct nfs_read_data *data = calldata; 222 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 223 224 if (nfs_readpage_result(task, data) != 0) 225 return; 226 227 nfs_direct_dirty_pages(data->pagevec, data->npages); 228 nfs_direct_release_pages(data->pagevec, data->npages); 229 230 spin_lock(&dreq->lock); 231 232 if (likely(task->tk_status >= 0)) 233 dreq->count += data->res.count; 234 else 235 dreq->error = task->tk_status; 236 237 spin_unlock(&dreq->lock); 238 239 if (put_dreq(dreq)) 240 nfs_direct_complete(dreq); 241 } 242 243 static const struct rpc_call_ops nfs_read_direct_ops = { 244 .rpc_call_done = nfs_direct_read_result, 245 .rpc_release = nfs_readdata_release, 246 }; 247 248 /* 249 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ 250 * operation. If nfs_readdata_alloc() or get_user_pages() fails, 251 * bail and stop sending more reads. Read length accounting is 252 * handled automatically by nfs_direct_read_result(). Otherwise, if 253 * no requests have been sent, just return an error. 254 */ 255 static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos) 256 { 257 struct nfs_open_context *ctx = dreq->ctx; 258 struct inode *inode = ctx->dentry->d_inode; 259 size_t rsize = NFS_SERVER(inode)->rsize; 260 unsigned int pgbase; 261 int result; 262 ssize_t started = 0; 263 264 get_dreq(dreq); 265 266 do { 267 struct nfs_read_data *data; 268 size_t bytes; 269 270 pgbase = user_addr & ~PAGE_MASK; 271 bytes = min(rsize,count); 272 273 result = -ENOMEM; 274 data = nfs_readdata_alloc(pgbase + bytes); 275 if (unlikely(!data)) 276 break; 277 278 down_read(¤t->mm->mmap_sem); 279 result = get_user_pages(current, current->mm, user_addr, 280 data->npages, 1, 0, data->pagevec, NULL); 281 up_read(¤t->mm->mmap_sem); 282 if (unlikely(result < data->npages)) { 283 if (result > 0) 284 nfs_direct_release_pages(data->pagevec, result); 285 nfs_readdata_release(data); 286 break; 287 } 288 289 get_dreq(dreq); 290 291 data->req = (struct nfs_page *) dreq; 292 data->inode = inode; 293 data->cred = ctx->cred; 294 data->args.fh = NFS_FH(inode); 295 data->args.context = ctx; 296 data->args.offset = pos; 297 data->args.pgbase = pgbase; 298 data->args.pages = data->pagevec; 299 data->args.count = bytes; 300 data->res.fattr = &data->fattr; 301 data->res.eof = 0; 302 data->res.count = bytes; 303 304 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC, 305 &nfs_read_direct_ops, data); 306 NFS_PROTO(inode)->read_setup(data); 307 308 data->task.tk_cookie = (unsigned long) inode; 309 310 rpc_execute(&data->task); 311 312 dprintk("NFS: %5u initiated direct read call " 313 "(req %s/%Ld, %zu bytes @ offset %Lu)\n", 314 data->task.tk_pid, 315 inode->i_sb->s_id, 316 (long long)NFS_FILEID(inode), 317 bytes, 318 (unsigned long long)data->args.offset); 319 320 started += bytes; 321 user_addr += bytes; 322 pos += bytes; 323 /* FIXME: Remove this unnecessary math from final patch */ 324 pgbase += bytes; 325 pgbase &= ~PAGE_MASK; 326 BUG_ON(pgbase != (user_addr & ~PAGE_MASK)); 327 328 count -= bytes; 329 } while (count != 0); 330 331 if (put_dreq(dreq)) 332 nfs_direct_complete(dreq); 333 334 if (started) 335 return 0; 336 return result < 0 ? (ssize_t) result : -EFAULT; 337 } 338 339 static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos) 340 { 341 ssize_t result = 0; 342 sigset_t oldset; 343 struct inode *inode = iocb->ki_filp->f_mapping->host; 344 struct rpc_clnt *clnt = NFS_CLIENT(inode); 345 struct nfs_direct_req *dreq; 346 347 dreq = nfs_direct_req_alloc(); 348 if (!dreq) 349 return -ENOMEM; 350 351 dreq->inode = inode; 352 dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); 353 if (!is_sync_kiocb(iocb)) 354 dreq->iocb = iocb; 355 356 nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count); 357 rpc_clnt_sigmask(clnt, &oldset); 358 result = nfs_direct_read_schedule(dreq, user_addr, count, pos); 359 if (!result) 360 result = nfs_direct_wait(dreq); 361 rpc_clnt_sigunmask(clnt, &oldset); 362 363 return result; 364 } 365 366 static void nfs_direct_free_writedata(struct nfs_direct_req *dreq) 367 { 368 while (!list_empty(&dreq->rewrite_list)) { 369 struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages); 370 list_del(&data->pages); 371 nfs_direct_release_pages(data->pagevec, data->npages); 372 nfs_writedata_release(data); 373 } 374 } 375 376 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 377 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) 378 { 379 struct inode *inode = dreq->inode; 380 struct list_head *p; 381 struct nfs_write_data *data; 382 383 dreq->count = 0; 384 get_dreq(dreq); 385 386 list_for_each(p, &dreq->rewrite_list) { 387 data = list_entry(p, struct nfs_write_data, pages); 388 389 get_dreq(dreq); 390 391 /* 392 * Reset data->res. 393 */ 394 nfs_fattr_init(&data->fattr); 395 data->res.count = data->args.count; 396 memset(&data->verf, 0, sizeof(data->verf)); 397 398 /* 399 * Reuse data->task; data->args should not have changed 400 * since the original request was sent. 401 */ 402 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC, 403 &nfs_write_direct_ops, data); 404 NFS_PROTO(inode)->write_setup(data, FLUSH_STABLE); 405 406 data->task.tk_priority = RPC_PRIORITY_NORMAL; 407 data->task.tk_cookie = (unsigned long) inode; 408 409 /* 410 * We're called via an RPC callback, so BKL is already held. 411 */ 412 rpc_execute(&data->task); 413 414 dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n", 415 data->task.tk_pid, 416 inode->i_sb->s_id, 417 (long long)NFS_FILEID(inode), 418 data->args.count, 419 (unsigned long long)data->args.offset); 420 } 421 422 if (put_dreq(dreq)) 423 nfs_direct_write_complete(dreq, inode); 424 } 425 426 static void nfs_direct_commit_result(struct rpc_task *task, void *calldata) 427 { 428 struct nfs_write_data *data = calldata; 429 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 430 431 /* Call the NFS version-specific code */ 432 if (NFS_PROTO(data->inode)->commit_done(task, data) != 0) 433 return; 434 if (unlikely(task->tk_status < 0)) { 435 dreq->error = task->tk_status; 436 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; 437 } 438 if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) { 439 dprintk("NFS: %5u commit verify failed\n", task->tk_pid); 440 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; 441 } 442 443 dprintk("NFS: %5u commit returned %d\n", task->tk_pid, task->tk_status); 444 nfs_direct_write_complete(dreq, data->inode); 445 } 446 447 static const struct rpc_call_ops nfs_commit_direct_ops = { 448 .rpc_call_done = nfs_direct_commit_result, 449 .rpc_release = nfs_commit_release, 450 }; 451 452 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) 453 { 454 struct nfs_write_data *data = dreq->commit_data; 455 456 data->inode = dreq->inode; 457 data->cred = dreq->ctx->cred; 458 459 data->args.fh = NFS_FH(data->inode); 460 data->args.offset = 0; 461 data->args.count = 0; 462 data->res.count = 0; 463 data->res.fattr = &data->fattr; 464 data->res.verf = &data->verf; 465 466 rpc_init_task(&data->task, NFS_CLIENT(dreq->inode), RPC_TASK_ASYNC, 467 &nfs_commit_direct_ops, data); 468 NFS_PROTO(data->inode)->commit_setup(data, 0); 469 470 data->task.tk_priority = RPC_PRIORITY_NORMAL; 471 data->task.tk_cookie = (unsigned long)data->inode; 472 /* Note: task.tk_ops->rpc_release will free dreq->commit_data */ 473 dreq->commit_data = NULL; 474 475 dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid); 476 477 rpc_execute(&data->task); 478 } 479 480 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode) 481 { 482 int flags = dreq->flags; 483 484 dreq->flags = 0; 485 switch (flags) { 486 case NFS_ODIRECT_DO_COMMIT: 487 nfs_direct_commit_schedule(dreq); 488 break; 489 case NFS_ODIRECT_RESCHED_WRITES: 490 nfs_direct_write_reschedule(dreq); 491 break; 492 default: 493 nfs_end_data_update(inode); 494 if (dreq->commit_data != NULL) 495 nfs_commit_free(dreq->commit_data); 496 nfs_direct_free_writedata(dreq); 497 nfs_zap_mapping(inode, inode->i_mapping); 498 nfs_direct_complete(dreq); 499 } 500 } 501 502 static void nfs_alloc_commit_data(struct nfs_direct_req *dreq) 503 { 504 dreq->commit_data = nfs_commit_alloc(); 505 if (dreq->commit_data != NULL) 506 dreq->commit_data->req = (struct nfs_page *) dreq; 507 } 508 #else 509 static inline void nfs_alloc_commit_data(struct nfs_direct_req *dreq) 510 { 511 dreq->commit_data = NULL; 512 } 513 514 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode) 515 { 516 nfs_end_data_update(inode); 517 nfs_direct_free_writedata(dreq); 518 nfs_zap_mapping(inode, inode->i_mapping); 519 nfs_direct_complete(dreq); 520 } 521 #endif 522 523 static void nfs_direct_write_result(struct rpc_task *task, void *calldata) 524 { 525 struct nfs_write_data *data = calldata; 526 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 527 int status = task->tk_status; 528 529 if (nfs_writeback_done(task, data) != 0) 530 return; 531 532 spin_lock(&dreq->lock); 533 534 if (unlikely(status < 0)) { 535 dreq->error = status; 536 goto out_unlock; 537 } 538 539 dreq->count += data->res.count; 540 541 if (data->res.verf->committed != NFS_FILE_SYNC) { 542 switch (dreq->flags) { 543 case 0: 544 memcpy(&dreq->verf, &data->verf, sizeof(dreq->verf)); 545 dreq->flags = NFS_ODIRECT_DO_COMMIT; 546 break; 547 case NFS_ODIRECT_DO_COMMIT: 548 if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) { 549 dprintk("NFS: %5u write verify failed\n", task->tk_pid); 550 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; 551 } 552 } 553 } 554 out_unlock: 555 spin_unlock(&dreq->lock); 556 } 557 558 /* 559 * NB: Return the value of the first error return code. Subsequent 560 * errors after the first one are ignored. 561 */ 562 static void nfs_direct_write_release(void *calldata) 563 { 564 struct nfs_write_data *data = calldata; 565 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 566 567 if (put_dreq(dreq)) 568 nfs_direct_write_complete(dreq, data->inode); 569 } 570 571 static const struct rpc_call_ops nfs_write_direct_ops = { 572 .rpc_call_done = nfs_direct_write_result, 573 .rpc_release = nfs_direct_write_release, 574 }; 575 576 /* 577 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE 578 * operation. If nfs_writedata_alloc() or get_user_pages() fails, 579 * bail and stop sending more writes. Write length accounting is 580 * handled automatically by nfs_direct_write_result(). Otherwise, if 581 * no requests have been sent, just return an error. 582 */ 583 static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos, int sync) 584 { 585 struct nfs_open_context *ctx = dreq->ctx; 586 struct inode *inode = ctx->dentry->d_inode; 587 size_t wsize = NFS_SERVER(inode)->wsize; 588 unsigned int pgbase; 589 int result; 590 ssize_t started = 0; 591 592 get_dreq(dreq); 593 594 do { 595 struct nfs_write_data *data; 596 size_t bytes; 597 598 pgbase = user_addr & ~PAGE_MASK; 599 bytes = min(wsize,count); 600 601 result = -ENOMEM; 602 data = nfs_writedata_alloc(pgbase + bytes); 603 if (unlikely(!data)) 604 break; 605 606 down_read(¤t->mm->mmap_sem); 607 result = get_user_pages(current, current->mm, user_addr, 608 data->npages, 0, 0, data->pagevec, NULL); 609 up_read(¤t->mm->mmap_sem); 610 if (unlikely(result < data->npages)) { 611 if (result > 0) 612 nfs_direct_release_pages(data->pagevec, result); 613 nfs_writedata_release(data); 614 break; 615 } 616 617 get_dreq(dreq); 618 619 list_move_tail(&data->pages, &dreq->rewrite_list); 620 621 data->req = (struct nfs_page *) dreq; 622 data->inode = inode; 623 data->cred = ctx->cred; 624 data->args.fh = NFS_FH(inode); 625 data->args.context = ctx; 626 data->args.offset = pos; 627 data->args.pgbase = pgbase; 628 data->args.pages = data->pagevec; 629 data->args.count = bytes; 630 data->res.fattr = &data->fattr; 631 data->res.count = bytes; 632 data->res.verf = &data->verf; 633 634 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC, 635 &nfs_write_direct_ops, data); 636 NFS_PROTO(inode)->write_setup(data, sync); 637 638 data->task.tk_priority = RPC_PRIORITY_NORMAL; 639 data->task.tk_cookie = (unsigned long) inode; 640 641 rpc_execute(&data->task); 642 643 dprintk("NFS: %5u initiated direct write call " 644 "(req %s/%Ld, %zu bytes @ offset %Lu)\n", 645 data->task.tk_pid, 646 inode->i_sb->s_id, 647 (long long)NFS_FILEID(inode), 648 bytes, 649 (unsigned long long)data->args.offset); 650 651 started += bytes; 652 user_addr += bytes; 653 pos += bytes; 654 655 /* FIXME: Remove this useless math from the final patch */ 656 pgbase += bytes; 657 pgbase &= ~PAGE_MASK; 658 BUG_ON(pgbase != (user_addr & ~PAGE_MASK)); 659 660 count -= bytes; 661 } while (count != 0); 662 663 if (put_dreq(dreq)) 664 nfs_direct_write_complete(dreq, inode); 665 666 if (started) 667 return 0; 668 return result < 0 ? (ssize_t) result : -EFAULT; 669 } 670 671 static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos) 672 { 673 ssize_t result = 0; 674 sigset_t oldset; 675 struct inode *inode = iocb->ki_filp->f_mapping->host; 676 struct rpc_clnt *clnt = NFS_CLIENT(inode); 677 struct nfs_direct_req *dreq; 678 size_t wsize = NFS_SERVER(inode)->wsize; 679 int sync = 0; 680 681 dreq = nfs_direct_req_alloc(); 682 if (!dreq) 683 return -ENOMEM; 684 nfs_alloc_commit_data(dreq); 685 686 if (dreq->commit_data == NULL || count < wsize) 687 sync = FLUSH_STABLE; 688 689 dreq->inode = inode; 690 dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); 691 if (!is_sync_kiocb(iocb)) 692 dreq->iocb = iocb; 693 694 nfs_add_stats(inode, NFSIOS_DIRECTWRITTENBYTES, count); 695 696 nfs_begin_data_update(inode); 697 698 rpc_clnt_sigmask(clnt, &oldset); 699 result = nfs_direct_write_schedule(dreq, user_addr, count, pos, sync); 700 if (!result) 701 result = nfs_direct_wait(dreq); 702 rpc_clnt_sigunmask(clnt, &oldset); 703 704 return result; 705 } 706 707 /** 708 * nfs_file_direct_read - file direct read operation for NFS files 709 * @iocb: target I/O control block 710 * @iov: vector of user buffers into which to read data 711 * @nr_segs: size of iov vector 712 * @pos: byte offset in file where reading starts 713 * 714 * We use this function for direct reads instead of calling 715 * generic_file_aio_read() in order to avoid gfar's check to see if 716 * the request starts before the end of the file. For that check 717 * to work, we must generate a GETATTR before each direct read, and 718 * even then there is a window between the GETATTR and the subsequent 719 * READ where the file size could change. Our preference is simply 720 * to do all reads the application wants, and the server will take 721 * care of managing the end of file boundary. 722 * 723 * This function also eliminates unnecessarily updating the file's 724 * atime locally, as the NFS server sets the file's atime, and this 725 * client must read the updated atime from the server back into its 726 * cache. 727 */ 728 ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov, 729 unsigned long nr_segs, loff_t pos) 730 { 731 ssize_t retval = -EINVAL; 732 struct file *file = iocb->ki_filp; 733 struct address_space *mapping = file->f_mapping; 734 /* XXX: temporary */ 735 const char __user *buf = iov[0].iov_base; 736 size_t count = iov[0].iov_len; 737 738 dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n", 739 file->f_path.dentry->d_parent->d_name.name, 740 file->f_path.dentry->d_name.name, 741 (unsigned long) count, (long long) pos); 742 743 if (nr_segs != 1) 744 return -EINVAL; 745 746 if (count < 0) 747 goto out; 748 retval = -EFAULT; 749 if (!access_ok(VERIFY_WRITE, buf, count)) 750 goto out; 751 retval = 0; 752 if (!count) 753 goto out; 754 755 retval = nfs_sync_mapping(mapping); 756 if (retval) 757 goto out; 758 759 retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos); 760 if (retval > 0) 761 iocb->ki_pos = pos + retval; 762 763 out: 764 return retval; 765 } 766 767 /** 768 * nfs_file_direct_write - file direct write operation for NFS files 769 * @iocb: target I/O control block 770 * @iov: vector of user buffers from which to write data 771 * @nr_segs: size of iov vector 772 * @pos: byte offset in file where writing starts 773 * 774 * We use this function for direct writes instead of calling 775 * generic_file_aio_write() in order to avoid taking the inode 776 * semaphore and updating the i_size. The NFS server will set 777 * the new i_size and this client must read the updated size 778 * back into its cache. We let the server do generic write 779 * parameter checking and report problems. 780 * 781 * We also avoid an unnecessary invocation of generic_osync_inode(), 782 * as it is fairly meaningless to sync the metadata of an NFS file. 783 * 784 * We eliminate local atime updates, see direct read above. 785 * 786 * We avoid unnecessary page cache invalidations for normal cached 787 * readers of this file. 788 * 789 * Note that O_APPEND is not supported for NFS direct writes, as there 790 * is no atomic O_APPEND write facility in the NFS protocol. 791 */ 792 ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov, 793 unsigned long nr_segs, loff_t pos) 794 { 795 ssize_t retval; 796 struct file *file = iocb->ki_filp; 797 struct address_space *mapping = file->f_mapping; 798 /* XXX: temporary */ 799 const char __user *buf = iov[0].iov_base; 800 size_t count = iov[0].iov_len; 801 802 dprintk("nfs: direct write(%s/%s, %lu@%Ld)\n", 803 file->f_path.dentry->d_parent->d_name.name, 804 file->f_path.dentry->d_name.name, 805 (unsigned long) count, (long long) pos); 806 807 if (nr_segs != 1) 808 return -EINVAL; 809 810 retval = generic_write_checks(file, &pos, &count, 0); 811 if (retval) 812 goto out; 813 814 retval = -EINVAL; 815 if ((ssize_t) count < 0) 816 goto out; 817 retval = 0; 818 if (!count) 819 goto out; 820 821 retval = -EFAULT; 822 if (!access_ok(VERIFY_READ, buf, count)) 823 goto out; 824 825 retval = nfs_sync_mapping(mapping); 826 if (retval) 827 goto out; 828 829 retval = nfs_direct_write(iocb, (unsigned long) buf, count, pos); 830 831 if (retval > 0) 832 iocb->ki_pos = pos + retval; 833 834 out: 835 return retval; 836 } 837 838 /** 839 * nfs_init_directcache - create a slab cache for nfs_direct_req structures 840 * 841 */ 842 int __init nfs_init_directcache(void) 843 { 844 nfs_direct_cachep = kmem_cache_create("nfs_direct_cache", 845 sizeof(struct nfs_direct_req), 846 0, (SLAB_RECLAIM_ACCOUNT| 847 SLAB_MEM_SPREAD), 848 NULL, NULL); 849 if (nfs_direct_cachep == NULL) 850 return -ENOMEM; 851 852 return 0; 853 } 854 855 /** 856 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures 857 * 858 */ 859 void nfs_destroy_directcache(void) 860 { 861 kmem_cache_destroy(nfs_direct_cachep); 862 } 863