1 /* 2 * linux/fs/nfs/read.c 3 * 4 * Block I/O for NFS 5 * 6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c 7 * modified for async RPC by okir@monad.swb.de 8 * 9 * We do an ugly hack here in order to return proper error codes to the 10 * user program when a read request failed: since generic_file_read 11 * only checks the return value of inode->i_op->readpage() which is always 0 12 * for async RPC, we set the error bit of the page to 1 when an error occurs, 13 * and make nfs_readpage transmit requests synchronously when encountering this. 14 * This is only a small problem, though, since we now retry all operations 15 * within the RPC code when root squashing is suspected. 16 */ 17 18 #include <linux/config.h> 19 #include <linux/time.h> 20 #include <linux/kernel.h> 21 #include <linux/errno.h> 22 #include <linux/fcntl.h> 23 #include <linux/stat.h> 24 #include <linux/mm.h> 25 #include <linux/slab.h> 26 #include <linux/pagemap.h> 27 #include <linux/sunrpc/clnt.h> 28 #include <linux/nfs_fs.h> 29 #include <linux/nfs_page.h> 30 #include <linux/smp_lock.h> 31 32 #include <asm/system.h> 33 34 #include "iostat.h" 35 36 #define NFSDBG_FACILITY NFSDBG_PAGECACHE 37 38 static int nfs_pagein_one(struct list_head *, struct inode *); 39 static const struct rpc_call_ops nfs_read_partial_ops; 40 static const struct rpc_call_ops nfs_read_full_ops; 41 42 static kmem_cache_t *nfs_rdata_cachep; 43 static mempool_t *nfs_rdata_mempool; 44 45 #define MIN_POOL_READ (32) 46 47 struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount) 48 { 49 struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, SLAB_NOFS); 50 51 if (p) { 52 memset(p, 0, sizeof(*p)); 53 INIT_LIST_HEAD(&p->pages); 54 if (pagecount < NFS_PAGEVEC_SIZE) 55 p->pagevec = &p->page_array[0]; 56 else { 57 size_t size = ++pagecount * sizeof(struct page *); 58 p->pagevec = kmalloc(size, GFP_NOFS); 59 if (p->pagevec) { 60 memset(p->pagevec, 0, size); 61 } else { 62 mempool_free(p, nfs_rdata_mempool); 63 p = NULL; 64 } 65 } 66 } 67 return p; 68 } 69 70 void nfs_readdata_free(struct nfs_read_data *p) 71 { 72 if (p && (p->pagevec != &p->page_array[0])) 73 kfree(p->pagevec); 74 mempool_free(p, nfs_rdata_mempool); 75 } 76 77 void nfs_readdata_release(void *data) 78 { 79 nfs_readdata_free(data); 80 } 81 82 static 83 unsigned int nfs_page_length(struct inode *inode, struct page *page) 84 { 85 loff_t i_size = i_size_read(inode); 86 unsigned long idx; 87 88 if (i_size <= 0) 89 return 0; 90 idx = (i_size - 1) >> PAGE_CACHE_SHIFT; 91 if (page->index > idx) 92 return 0; 93 if (page->index != idx) 94 return PAGE_CACHE_SIZE; 95 return 1 + ((i_size - 1) & (PAGE_CACHE_SIZE - 1)); 96 } 97 98 static 99 int nfs_return_empty_page(struct page *page) 100 { 101 memclear_highpage_flush(page, 0, PAGE_CACHE_SIZE); 102 SetPageUptodate(page); 103 unlock_page(page); 104 return 0; 105 } 106 107 /* 108 * Read a page synchronously. 109 */ 110 static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode, 111 struct page *page) 112 { 113 unsigned int rsize = NFS_SERVER(inode)->rsize; 114 unsigned int count = PAGE_CACHE_SIZE; 115 int result; 116 struct nfs_read_data *rdata; 117 118 rdata = nfs_readdata_alloc(1); 119 if (!rdata) 120 return -ENOMEM; 121 122 memset(rdata, 0, sizeof(*rdata)); 123 rdata->flags = (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0); 124 rdata->cred = ctx->cred; 125 rdata->inode = inode; 126 INIT_LIST_HEAD(&rdata->pages); 127 rdata->args.fh = NFS_FH(inode); 128 rdata->args.context = ctx; 129 rdata->args.pages = &page; 130 rdata->args.pgbase = 0UL; 131 rdata->args.count = rsize; 132 rdata->res.fattr = &rdata->fattr; 133 134 dprintk("NFS: nfs_readpage_sync(%p)\n", page); 135 136 /* 137 * This works now because the socket layer never tries to DMA 138 * into this buffer directly. 139 */ 140 do { 141 if (count < rsize) 142 rdata->args.count = count; 143 rdata->res.count = rdata->args.count; 144 rdata->args.offset = page_offset(page) + rdata->args.pgbase; 145 146 dprintk("NFS: nfs_proc_read(%s, (%s/%Ld), %Lu, %u)\n", 147 NFS_SERVER(inode)->hostname, 148 inode->i_sb->s_id, 149 (long long)NFS_FILEID(inode), 150 (unsigned long long)rdata->args.pgbase, 151 rdata->args.count); 152 153 lock_kernel(); 154 result = NFS_PROTO(inode)->read(rdata); 155 unlock_kernel(); 156 157 /* 158 * Even if we had a partial success we can't mark the page 159 * cache valid. 160 */ 161 if (result < 0) { 162 if (result == -EISDIR) 163 result = -EINVAL; 164 goto io_error; 165 } 166 count -= result; 167 rdata->args.pgbase += result; 168 nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, result); 169 170 /* Note: result == 0 should only happen if we're caching 171 * a write that extends the file and punches a hole. 172 */ 173 if (rdata->res.eof != 0 || result == 0) 174 break; 175 } while (count); 176 spin_lock(&inode->i_lock); 177 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME; 178 spin_unlock(&inode->i_lock); 179 180 if (count) 181 memclear_highpage_flush(page, rdata->args.pgbase, count); 182 SetPageUptodate(page); 183 if (PageError(page)) 184 ClearPageError(page); 185 result = 0; 186 187 io_error: 188 unlock_page(page); 189 nfs_readdata_free(rdata); 190 return result; 191 } 192 193 static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, 194 struct page *page) 195 { 196 LIST_HEAD(one_request); 197 struct nfs_page *new; 198 unsigned int len; 199 200 len = nfs_page_length(inode, page); 201 if (len == 0) 202 return nfs_return_empty_page(page); 203 new = nfs_create_request(ctx, inode, page, 0, len); 204 if (IS_ERR(new)) { 205 unlock_page(page); 206 return PTR_ERR(new); 207 } 208 if (len < PAGE_CACHE_SIZE) 209 memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); 210 211 nfs_list_add_request(new, &one_request); 212 nfs_pagein_one(&one_request, inode); 213 return 0; 214 } 215 216 static void nfs_readpage_release(struct nfs_page *req) 217 { 218 unlock_page(req->wb_page); 219 220 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", 221 req->wb_context->dentry->d_inode->i_sb->s_id, 222 (long long)NFS_FILEID(req->wb_context->dentry->d_inode), 223 req->wb_bytes, 224 (long long)req_offset(req)); 225 nfs_clear_request(req); 226 nfs_release_request(req); 227 } 228 229 /* 230 * Set up the NFS read request struct 231 */ 232 static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data, 233 const struct rpc_call_ops *call_ops, 234 unsigned int count, unsigned int offset) 235 { 236 struct inode *inode; 237 int flags; 238 239 data->req = req; 240 data->inode = inode = req->wb_context->dentry->d_inode; 241 data->cred = req->wb_context->cred; 242 243 data->args.fh = NFS_FH(inode); 244 data->args.offset = req_offset(req) + offset; 245 data->args.pgbase = req->wb_pgbase + offset; 246 data->args.pages = data->pagevec; 247 data->args.count = count; 248 data->args.context = req->wb_context; 249 250 data->res.fattr = &data->fattr; 251 data->res.count = count; 252 data->res.eof = 0; 253 nfs_fattr_init(&data->fattr); 254 255 /* Set up the initial task struct. */ 256 flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0); 257 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data); 258 NFS_PROTO(inode)->read_setup(data); 259 260 data->task.tk_cookie = (unsigned long)inode; 261 262 dprintk("NFS: %4d initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n", 263 data->task.tk_pid, 264 inode->i_sb->s_id, 265 (long long)NFS_FILEID(inode), 266 count, 267 (unsigned long long)data->args.offset); 268 } 269 270 static void 271 nfs_async_read_error(struct list_head *head) 272 { 273 struct nfs_page *req; 274 275 while (!list_empty(head)) { 276 req = nfs_list_entry(head->next); 277 nfs_list_remove_request(req); 278 SetPageError(req->wb_page); 279 nfs_readpage_release(req); 280 } 281 } 282 283 /* 284 * Start an async read operation 285 */ 286 static void nfs_execute_read(struct nfs_read_data *data) 287 { 288 struct rpc_clnt *clnt = NFS_CLIENT(data->inode); 289 sigset_t oldset; 290 291 rpc_clnt_sigmask(clnt, &oldset); 292 lock_kernel(); 293 rpc_execute(&data->task); 294 unlock_kernel(); 295 rpc_clnt_sigunmask(clnt, &oldset); 296 } 297 298 /* 299 * Generate multiple requests to fill a single page. 300 * 301 * We optimize to reduce the number of read operations on the wire. If we 302 * detect that we're reading a page, or an area of a page, that is past the 303 * end of file, we do not generate NFS read operations but just clear the 304 * parts of the page that would have come back zero from the server anyway. 305 * 306 * We rely on the cached value of i_size to make this determination; another 307 * client can fill pages on the server past our cached end-of-file, but we 308 * won't see the new data until our attribute cache is updated. This is more 309 * or less conventional NFS client behavior. 310 */ 311 static int nfs_pagein_multi(struct list_head *head, struct inode *inode) 312 { 313 struct nfs_page *req = nfs_list_entry(head->next); 314 struct page *page = req->wb_page; 315 struct nfs_read_data *data; 316 unsigned int rsize = NFS_SERVER(inode)->rsize; 317 unsigned int nbytes, offset; 318 int requests = 0; 319 LIST_HEAD(list); 320 321 nfs_list_remove_request(req); 322 323 nbytes = req->wb_bytes; 324 for(;;) { 325 data = nfs_readdata_alloc(1); 326 if (!data) 327 goto out_bad; 328 INIT_LIST_HEAD(&data->pages); 329 list_add(&data->pages, &list); 330 requests++; 331 if (nbytes <= rsize) 332 break; 333 nbytes -= rsize; 334 } 335 atomic_set(&req->wb_complete, requests); 336 337 ClearPageError(page); 338 offset = 0; 339 nbytes = req->wb_bytes; 340 do { 341 data = list_entry(list.next, struct nfs_read_data, pages); 342 list_del_init(&data->pages); 343 344 data->pagevec[0] = page; 345 346 if (nbytes > rsize) { 347 nfs_read_rpcsetup(req, data, &nfs_read_partial_ops, 348 rsize, offset); 349 offset += rsize; 350 nbytes -= rsize; 351 } else { 352 nfs_read_rpcsetup(req, data, &nfs_read_partial_ops, 353 nbytes, offset); 354 nbytes = 0; 355 } 356 nfs_execute_read(data); 357 } while (nbytes != 0); 358 359 return 0; 360 361 out_bad: 362 while (!list_empty(&list)) { 363 data = list_entry(list.next, struct nfs_read_data, pages); 364 list_del(&data->pages); 365 nfs_readdata_free(data); 366 } 367 SetPageError(page); 368 nfs_readpage_release(req); 369 return -ENOMEM; 370 } 371 372 static int nfs_pagein_one(struct list_head *head, struct inode *inode) 373 { 374 struct nfs_page *req; 375 struct page **pages; 376 struct nfs_read_data *data; 377 unsigned int count; 378 379 if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) 380 return nfs_pagein_multi(head, inode); 381 382 data = nfs_readdata_alloc(NFS_SERVER(inode)->rpages); 383 if (!data) 384 goto out_bad; 385 386 INIT_LIST_HEAD(&data->pages); 387 pages = data->pagevec; 388 count = 0; 389 while (!list_empty(head)) { 390 req = nfs_list_entry(head->next); 391 nfs_list_remove_request(req); 392 nfs_list_add_request(req, &data->pages); 393 ClearPageError(req->wb_page); 394 *pages++ = req->wb_page; 395 count += req->wb_bytes; 396 } 397 req = nfs_list_entry(data->pages.next); 398 399 nfs_read_rpcsetup(req, data, &nfs_read_full_ops, count, 0); 400 401 nfs_execute_read(data); 402 return 0; 403 out_bad: 404 nfs_async_read_error(head); 405 return -ENOMEM; 406 } 407 408 static int 409 nfs_pagein_list(struct list_head *head, int rpages) 410 { 411 LIST_HEAD(one_request); 412 struct nfs_page *req; 413 int error = 0; 414 unsigned int pages = 0; 415 416 while (!list_empty(head)) { 417 pages += nfs_coalesce_requests(head, &one_request, rpages); 418 req = nfs_list_entry(one_request.next); 419 error = nfs_pagein_one(&one_request, req->wb_context->dentry->d_inode); 420 if (error < 0) 421 break; 422 } 423 if (error >= 0) 424 return pages; 425 426 nfs_async_read_error(head); 427 return error; 428 } 429 430 /* 431 * Handle a read reply that fills part of a page. 432 */ 433 static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata) 434 { 435 struct nfs_read_data *data = calldata; 436 struct nfs_page *req = data->req; 437 struct page *page = req->wb_page; 438 439 if (nfs_readpage_result(task, data) != 0) 440 return; 441 if (task->tk_status >= 0) { 442 unsigned int request = data->args.count; 443 unsigned int result = data->res.count; 444 445 if (result < request) { 446 memclear_highpage_flush(page, 447 data->args.pgbase + result, 448 request - result); 449 } 450 } else 451 SetPageError(page); 452 453 if (atomic_dec_and_test(&req->wb_complete)) { 454 if (!PageError(page)) 455 SetPageUptodate(page); 456 nfs_readpage_release(req); 457 } 458 } 459 460 static const struct rpc_call_ops nfs_read_partial_ops = { 461 .rpc_call_done = nfs_readpage_result_partial, 462 .rpc_release = nfs_readdata_release, 463 }; 464 465 /* 466 * This is the callback from RPC telling us whether a reply was 467 * received or some error occurred (timeout or socket shutdown). 468 */ 469 static void nfs_readpage_result_full(struct rpc_task *task, void *calldata) 470 { 471 struct nfs_read_data *data = calldata; 472 unsigned int count = data->res.count; 473 474 if (nfs_readpage_result(task, data) != 0) 475 return; 476 while (!list_empty(&data->pages)) { 477 struct nfs_page *req = nfs_list_entry(data->pages.next); 478 struct page *page = req->wb_page; 479 nfs_list_remove_request(req); 480 481 if (task->tk_status >= 0) { 482 if (count < PAGE_CACHE_SIZE) { 483 if (count < req->wb_bytes) 484 memclear_highpage_flush(page, 485 req->wb_pgbase + count, 486 req->wb_bytes - count); 487 count = 0; 488 } else 489 count -= PAGE_CACHE_SIZE; 490 SetPageUptodate(page); 491 } else 492 SetPageError(page); 493 nfs_readpage_release(req); 494 } 495 } 496 497 static const struct rpc_call_ops nfs_read_full_ops = { 498 .rpc_call_done = nfs_readpage_result_full, 499 .rpc_release = nfs_readdata_release, 500 }; 501 502 /* 503 * This is the callback from RPC telling us whether a reply was 504 * received or some error occurred (timeout or socket shutdown). 505 */ 506 int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data) 507 { 508 struct nfs_readargs *argp = &data->args; 509 struct nfs_readres *resp = &data->res; 510 int status; 511 512 dprintk("NFS: %4d nfs_readpage_result, (status %d)\n", 513 task->tk_pid, task->tk_status); 514 515 status = NFS_PROTO(data->inode)->read_done(task, data); 516 if (status != 0) 517 return status; 518 519 nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, resp->count); 520 521 /* Is this a short read? */ 522 if (task->tk_status >= 0 && resp->count < argp->count && !resp->eof) { 523 nfs_inc_stats(data->inode, NFSIOS_SHORTREAD); 524 /* Has the server at least made some progress? */ 525 if (resp->count != 0) { 526 /* Yes, so retry the read at the end of the data */ 527 argp->offset += resp->count; 528 argp->pgbase += resp->count; 529 argp->count -= resp->count; 530 rpc_restart_call(task); 531 return -EAGAIN; 532 } 533 task->tk_status = -EIO; 534 } 535 spin_lock(&data->inode->i_lock); 536 NFS_I(data->inode)->cache_validity |= NFS_INO_INVALID_ATIME; 537 spin_unlock(&data->inode->i_lock); 538 return 0; 539 } 540 541 /* 542 * Read a page over NFS. 543 * We read the page synchronously in the following case: 544 * - The error flag is set for this page. This happens only when a 545 * previous async read operation failed. 546 */ 547 int nfs_readpage(struct file *file, struct page *page) 548 { 549 struct nfs_open_context *ctx; 550 struct inode *inode = page->mapping->host; 551 int error; 552 553 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n", 554 page, PAGE_CACHE_SIZE, page->index); 555 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); 556 nfs_add_stats(inode, NFSIOS_READPAGES, 1); 557 558 /* 559 * Try to flush any pending writes to the file.. 560 * 561 * NOTE! Because we own the page lock, there cannot 562 * be any new pending writes generated at this point 563 * for this page (other pages can be written to). 564 */ 565 error = nfs_wb_page(inode, page); 566 if (error) 567 goto out_error; 568 569 if (file == NULL) { 570 ctx = nfs_find_open_context(inode, NULL, FMODE_READ); 571 if (ctx == NULL) 572 return -EBADF; 573 } else 574 ctx = get_nfs_open_context((struct nfs_open_context *) 575 file->private_data); 576 if (!IS_SYNC(inode)) { 577 error = nfs_readpage_async(ctx, inode, page); 578 goto out; 579 } 580 581 error = nfs_readpage_sync(ctx, inode, page); 582 if (error < 0 && IS_SWAPFILE(inode)) 583 printk("Aiee.. nfs swap-in of page failed!\n"); 584 out: 585 put_nfs_open_context(ctx); 586 return error; 587 588 out_error: 589 unlock_page(page); 590 return error; 591 } 592 593 struct nfs_readdesc { 594 struct list_head *head; 595 struct nfs_open_context *ctx; 596 }; 597 598 static int 599 readpage_async_filler(void *data, struct page *page) 600 { 601 struct nfs_readdesc *desc = (struct nfs_readdesc *)data; 602 struct inode *inode = page->mapping->host; 603 struct nfs_page *new; 604 unsigned int len; 605 606 nfs_wb_page(inode, page); 607 len = nfs_page_length(inode, page); 608 if (len == 0) 609 return nfs_return_empty_page(page); 610 new = nfs_create_request(desc->ctx, inode, page, 0, len); 611 if (IS_ERR(new)) { 612 SetPageError(page); 613 unlock_page(page); 614 return PTR_ERR(new); 615 } 616 if (len < PAGE_CACHE_SIZE) 617 memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); 618 nfs_list_add_request(new, desc->head); 619 return 0; 620 } 621 622 int nfs_readpages(struct file *filp, struct address_space *mapping, 623 struct list_head *pages, unsigned nr_pages) 624 { 625 LIST_HEAD(head); 626 struct nfs_readdesc desc = { 627 .head = &head, 628 }; 629 struct inode *inode = mapping->host; 630 struct nfs_server *server = NFS_SERVER(inode); 631 int ret; 632 633 dprintk("NFS: nfs_readpages (%s/%Ld %d)\n", 634 inode->i_sb->s_id, 635 (long long)NFS_FILEID(inode), 636 nr_pages); 637 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES); 638 639 if (filp == NULL) { 640 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ); 641 if (desc.ctx == NULL) 642 return -EBADF; 643 } else 644 desc.ctx = get_nfs_open_context((struct nfs_open_context *) 645 filp->private_data); 646 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); 647 if (!list_empty(&head)) { 648 int err = nfs_pagein_list(&head, server->rpages); 649 if (!ret) 650 nfs_add_stats(inode, NFSIOS_READPAGES, err); 651 ret = err; 652 } 653 put_nfs_open_context(desc.ctx); 654 return ret; 655 } 656 657 int nfs_init_readpagecache(void) 658 { 659 nfs_rdata_cachep = kmem_cache_create("nfs_read_data", 660 sizeof(struct nfs_read_data), 661 0, SLAB_HWCACHE_ALIGN, 662 NULL, NULL); 663 if (nfs_rdata_cachep == NULL) 664 return -ENOMEM; 665 666 nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ, 667 nfs_rdata_cachep); 668 if (nfs_rdata_mempool == NULL) 669 return -ENOMEM; 670 671 return 0; 672 } 673 674 void nfs_destroy_readpagecache(void) 675 { 676 mempool_destroy(nfs_rdata_mempool); 677 if (kmem_cache_destroy(nfs_rdata_cachep)) 678 printk(KERN_INFO "nfs_read_data: not all structures were freed\n"); 679 } 680