1 /* 2 * linux/fs/nfs/read.c 3 * 4 * Block I/O for NFS 5 * 6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c 7 * modified for async RPC by okir@monad.swb.de 8 * 9 * We do an ugly hack here in order to return proper error codes to the 10 * user program when a read request failed: since generic_file_read 11 * only checks the return value of inode->i_op->readpage() which is always 0 12 * for async RPC, we set the error bit of the page to 1 when an error occurs, 13 * and make nfs_readpage transmit requests synchronously when encountering this. 14 * This is only a small problem, though, since we now retry all operations 15 * within the RPC code when root squashing is suspected. 16 */ 17 18 #include <linux/time.h> 19 #include <linux/kernel.h> 20 #include <linux/errno.h> 21 #include <linux/fcntl.h> 22 #include <linux/stat.h> 23 #include <linux/mm.h> 24 #include <linux/slab.h> 25 #include <linux/pagemap.h> 26 #include <linux/sunrpc/clnt.h> 27 #include <linux/nfs_fs.h> 28 #include <linux/nfs_page.h> 29 #include <linux/smp_lock.h> 30 31 #include <asm/system.h> 32 33 #include "iostat.h" 34 35 #define NFSDBG_FACILITY NFSDBG_PAGECACHE 36 37 static int nfs_pagein_one(struct list_head *, struct inode *); 38 static const struct rpc_call_ops nfs_read_partial_ops; 39 static const struct rpc_call_ops nfs_read_full_ops; 40 41 static kmem_cache_t *nfs_rdata_cachep; 42 static mempool_t *nfs_rdata_mempool; 43 44 #define MIN_POOL_READ (32) 45 46 struct nfs_read_data *nfs_readdata_alloc(size_t len) 47 { 48 unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 49 struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, SLAB_NOFS); 50 51 if (p) { 52 memset(p, 0, sizeof(*p)); 53 INIT_LIST_HEAD(&p->pages); 54 p->npages = pagecount; 55 if (pagecount <= ARRAY_SIZE(p->page_array)) 56 p->pagevec = p->page_array; 57 else { 58 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS); 59 if (!p->pagevec) { 60 mempool_free(p, nfs_rdata_mempool); 61 p = NULL; 62 } 63 } 64 } 65 return p; 66 } 67 68 static void nfs_readdata_free(struct nfs_read_data *p) 69 { 70 if (p && (p->pagevec != &p->page_array[0])) 71 kfree(p->pagevec); 72 mempool_free(p, nfs_rdata_mempool); 73 } 74 75 void nfs_readdata_release(void *data) 76 { 77 nfs_readdata_free(data); 78 } 79 80 static 81 unsigned int nfs_page_length(struct inode *inode, struct page *page) 82 { 83 loff_t i_size = i_size_read(inode); 84 unsigned long idx; 85 86 if (i_size <= 0) 87 return 0; 88 idx = (i_size - 1) >> PAGE_CACHE_SHIFT; 89 if (page->index > idx) 90 return 0; 91 if (page->index != idx) 92 return PAGE_CACHE_SIZE; 93 return 1 + ((i_size - 1) & (PAGE_CACHE_SIZE - 1)); 94 } 95 96 static 97 int nfs_return_empty_page(struct page *page) 98 { 99 memclear_highpage_flush(page, 0, PAGE_CACHE_SIZE); 100 SetPageUptodate(page); 101 unlock_page(page); 102 return 0; 103 } 104 105 static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data) 106 { 107 unsigned int remainder = data->args.count - data->res.count; 108 unsigned int base = data->args.pgbase + data->res.count; 109 unsigned int pglen; 110 struct page **pages; 111 112 if (data->res.eof == 0 || remainder == 0) 113 return; 114 /* 115 * Note: "remainder" can never be negative, since we check for 116 * this in the XDR code. 117 */ 118 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; 119 base &= ~PAGE_CACHE_MASK; 120 pglen = PAGE_CACHE_SIZE - base; 121 for (;;) { 122 if (remainder <= pglen) { 123 memclear_highpage_flush(*pages, base, remainder); 124 break; 125 } 126 memclear_highpage_flush(*pages, base, pglen); 127 pages++; 128 remainder -= pglen; 129 pglen = PAGE_CACHE_SIZE; 130 base = 0; 131 } 132 } 133 134 /* 135 * Read a page synchronously. 136 */ 137 static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode, 138 struct page *page) 139 { 140 unsigned int rsize = NFS_SERVER(inode)->rsize; 141 unsigned int count = PAGE_CACHE_SIZE; 142 int result; 143 struct nfs_read_data *rdata; 144 145 rdata = nfs_readdata_alloc(count); 146 if (!rdata) 147 return -ENOMEM; 148 149 memset(rdata, 0, sizeof(*rdata)); 150 rdata->flags = (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0); 151 rdata->cred = ctx->cred; 152 rdata->inode = inode; 153 INIT_LIST_HEAD(&rdata->pages); 154 rdata->args.fh = NFS_FH(inode); 155 rdata->args.context = ctx; 156 rdata->args.pages = &page; 157 rdata->args.pgbase = 0UL; 158 rdata->args.count = rsize; 159 rdata->res.fattr = &rdata->fattr; 160 161 dprintk("NFS: nfs_readpage_sync(%p)\n", page); 162 163 /* 164 * This works now because the socket layer never tries to DMA 165 * into this buffer directly. 166 */ 167 do { 168 if (count < rsize) 169 rdata->args.count = count; 170 rdata->res.count = rdata->args.count; 171 rdata->args.offset = page_offset(page) + rdata->args.pgbase; 172 173 dprintk("NFS: nfs_proc_read(%s, (%s/%Ld), %Lu, %u)\n", 174 NFS_SERVER(inode)->hostname, 175 inode->i_sb->s_id, 176 (long long)NFS_FILEID(inode), 177 (unsigned long long)rdata->args.pgbase, 178 rdata->args.count); 179 180 lock_kernel(); 181 result = NFS_PROTO(inode)->read(rdata); 182 unlock_kernel(); 183 184 /* 185 * Even if we had a partial success we can't mark the page 186 * cache valid. 187 */ 188 if (result < 0) { 189 if (result == -EISDIR) 190 result = -EINVAL; 191 goto io_error; 192 } 193 count -= result; 194 rdata->args.pgbase += result; 195 nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, result); 196 197 /* Note: result == 0 should only happen if we're caching 198 * a write that extends the file and punches a hole. 199 */ 200 if (rdata->res.eof != 0 || result == 0) 201 break; 202 } while (count); 203 spin_lock(&inode->i_lock); 204 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME; 205 spin_unlock(&inode->i_lock); 206 207 if (rdata->res.eof || rdata->res.count == rdata->args.count) { 208 SetPageUptodate(page); 209 if (rdata->res.eof && count != 0) 210 memclear_highpage_flush(page, rdata->args.pgbase, count); 211 } 212 result = 0; 213 214 io_error: 215 unlock_page(page); 216 nfs_readdata_free(rdata); 217 return result; 218 } 219 220 static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, 221 struct page *page) 222 { 223 LIST_HEAD(one_request); 224 struct nfs_page *new; 225 unsigned int len; 226 227 len = nfs_page_length(inode, page); 228 if (len == 0) 229 return nfs_return_empty_page(page); 230 new = nfs_create_request(ctx, inode, page, 0, len); 231 if (IS_ERR(new)) { 232 unlock_page(page); 233 return PTR_ERR(new); 234 } 235 if (len < PAGE_CACHE_SIZE) 236 memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); 237 238 nfs_list_add_request(new, &one_request); 239 nfs_pagein_one(&one_request, inode); 240 return 0; 241 } 242 243 static void nfs_readpage_release(struct nfs_page *req) 244 { 245 unlock_page(req->wb_page); 246 247 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", 248 req->wb_context->dentry->d_inode->i_sb->s_id, 249 (long long)NFS_FILEID(req->wb_context->dentry->d_inode), 250 req->wb_bytes, 251 (long long)req_offset(req)); 252 nfs_clear_request(req); 253 nfs_release_request(req); 254 } 255 256 /* 257 * Set up the NFS read request struct 258 */ 259 static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data, 260 const struct rpc_call_ops *call_ops, 261 unsigned int count, unsigned int offset) 262 { 263 struct inode *inode; 264 int flags; 265 266 data->req = req; 267 data->inode = inode = req->wb_context->dentry->d_inode; 268 data->cred = req->wb_context->cred; 269 270 data->args.fh = NFS_FH(inode); 271 data->args.offset = req_offset(req) + offset; 272 data->args.pgbase = req->wb_pgbase + offset; 273 data->args.pages = data->pagevec; 274 data->args.count = count; 275 data->args.context = req->wb_context; 276 277 data->res.fattr = &data->fattr; 278 data->res.count = count; 279 data->res.eof = 0; 280 nfs_fattr_init(&data->fattr); 281 282 /* Set up the initial task struct. */ 283 flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0); 284 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data); 285 NFS_PROTO(inode)->read_setup(data); 286 287 data->task.tk_cookie = (unsigned long)inode; 288 289 dprintk("NFS: %4d initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n", 290 data->task.tk_pid, 291 inode->i_sb->s_id, 292 (long long)NFS_FILEID(inode), 293 count, 294 (unsigned long long)data->args.offset); 295 } 296 297 static void 298 nfs_async_read_error(struct list_head *head) 299 { 300 struct nfs_page *req; 301 302 while (!list_empty(head)) { 303 req = nfs_list_entry(head->next); 304 nfs_list_remove_request(req); 305 SetPageError(req->wb_page); 306 nfs_readpage_release(req); 307 } 308 } 309 310 /* 311 * Start an async read operation 312 */ 313 static void nfs_execute_read(struct nfs_read_data *data) 314 { 315 struct rpc_clnt *clnt = NFS_CLIENT(data->inode); 316 sigset_t oldset; 317 318 rpc_clnt_sigmask(clnt, &oldset); 319 lock_kernel(); 320 rpc_execute(&data->task); 321 unlock_kernel(); 322 rpc_clnt_sigunmask(clnt, &oldset); 323 } 324 325 /* 326 * Generate multiple requests to fill a single page. 327 * 328 * We optimize to reduce the number of read operations on the wire. If we 329 * detect that we're reading a page, or an area of a page, that is past the 330 * end of file, we do not generate NFS read operations but just clear the 331 * parts of the page that would have come back zero from the server anyway. 332 * 333 * We rely on the cached value of i_size to make this determination; another 334 * client can fill pages on the server past our cached end-of-file, but we 335 * won't see the new data until our attribute cache is updated. This is more 336 * or less conventional NFS client behavior. 337 */ 338 static int nfs_pagein_multi(struct list_head *head, struct inode *inode) 339 { 340 struct nfs_page *req = nfs_list_entry(head->next); 341 struct page *page = req->wb_page; 342 struct nfs_read_data *data; 343 size_t rsize = NFS_SERVER(inode)->rsize, nbytes; 344 unsigned int offset; 345 int requests = 0; 346 LIST_HEAD(list); 347 348 nfs_list_remove_request(req); 349 350 nbytes = req->wb_bytes; 351 do { 352 size_t len = min(nbytes,rsize); 353 354 data = nfs_readdata_alloc(len); 355 if (!data) 356 goto out_bad; 357 INIT_LIST_HEAD(&data->pages); 358 list_add(&data->pages, &list); 359 requests++; 360 nbytes -= len; 361 } while(nbytes != 0); 362 atomic_set(&req->wb_complete, requests); 363 364 ClearPageError(page); 365 offset = 0; 366 nbytes = req->wb_bytes; 367 do { 368 data = list_entry(list.next, struct nfs_read_data, pages); 369 list_del_init(&data->pages); 370 371 data->pagevec[0] = page; 372 373 if (nbytes > rsize) { 374 nfs_read_rpcsetup(req, data, &nfs_read_partial_ops, 375 rsize, offset); 376 offset += rsize; 377 nbytes -= rsize; 378 } else { 379 nfs_read_rpcsetup(req, data, &nfs_read_partial_ops, 380 nbytes, offset); 381 nbytes = 0; 382 } 383 nfs_execute_read(data); 384 } while (nbytes != 0); 385 386 return 0; 387 388 out_bad: 389 while (!list_empty(&list)) { 390 data = list_entry(list.next, struct nfs_read_data, pages); 391 list_del(&data->pages); 392 nfs_readdata_free(data); 393 } 394 SetPageError(page); 395 nfs_readpage_release(req); 396 return -ENOMEM; 397 } 398 399 static int nfs_pagein_one(struct list_head *head, struct inode *inode) 400 { 401 struct nfs_page *req; 402 struct page **pages; 403 struct nfs_read_data *data; 404 unsigned int count; 405 406 if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) 407 return nfs_pagein_multi(head, inode); 408 409 data = nfs_readdata_alloc(NFS_SERVER(inode)->rsize); 410 if (!data) 411 goto out_bad; 412 413 INIT_LIST_HEAD(&data->pages); 414 pages = data->pagevec; 415 count = 0; 416 while (!list_empty(head)) { 417 req = nfs_list_entry(head->next); 418 nfs_list_remove_request(req); 419 nfs_list_add_request(req, &data->pages); 420 ClearPageError(req->wb_page); 421 *pages++ = req->wb_page; 422 count += req->wb_bytes; 423 } 424 req = nfs_list_entry(data->pages.next); 425 426 nfs_read_rpcsetup(req, data, &nfs_read_full_ops, count, 0); 427 428 nfs_execute_read(data); 429 return 0; 430 out_bad: 431 nfs_async_read_error(head); 432 return -ENOMEM; 433 } 434 435 static int 436 nfs_pagein_list(struct list_head *head, int rpages) 437 { 438 LIST_HEAD(one_request); 439 struct nfs_page *req; 440 int error = 0; 441 unsigned int pages = 0; 442 443 while (!list_empty(head)) { 444 pages += nfs_coalesce_requests(head, &one_request, rpages); 445 req = nfs_list_entry(one_request.next); 446 error = nfs_pagein_one(&one_request, req->wb_context->dentry->d_inode); 447 if (error < 0) 448 break; 449 } 450 if (error >= 0) 451 return pages; 452 453 nfs_async_read_error(head); 454 return error; 455 } 456 457 /* 458 * Handle a read reply that fills part of a page. 459 */ 460 static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata) 461 { 462 struct nfs_read_data *data = calldata; 463 struct nfs_page *req = data->req; 464 struct page *page = req->wb_page; 465 466 if (likely(task->tk_status >= 0)) 467 nfs_readpage_truncate_uninitialised_page(data); 468 else 469 SetPageError(page); 470 if (nfs_readpage_result(task, data) != 0) 471 return; 472 if (atomic_dec_and_test(&req->wb_complete)) { 473 if (!PageError(page)) 474 SetPageUptodate(page); 475 nfs_readpage_release(req); 476 } 477 } 478 479 static const struct rpc_call_ops nfs_read_partial_ops = { 480 .rpc_call_done = nfs_readpage_result_partial, 481 .rpc_release = nfs_readdata_release, 482 }; 483 484 static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) 485 { 486 unsigned int count = data->res.count; 487 unsigned int base = data->args.pgbase; 488 struct page **pages; 489 490 if (data->res.eof) 491 count = data->args.count; 492 if (unlikely(count == 0)) 493 return; 494 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; 495 base &= ~PAGE_CACHE_MASK; 496 count += base; 497 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) 498 SetPageUptodate(*pages); 499 if (count != 0) 500 SetPageUptodate(*pages); 501 } 502 503 static void nfs_readpage_set_pages_error(struct nfs_read_data *data) 504 { 505 unsigned int count = data->args.count; 506 unsigned int base = data->args.pgbase; 507 struct page **pages; 508 509 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; 510 base &= ~PAGE_CACHE_MASK; 511 count += base; 512 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) 513 SetPageError(*pages); 514 if (count != 0) 515 SetPageError(*pages); 516 } 517 518 /* 519 * This is the callback from RPC telling us whether a reply was 520 * received or some error occurred (timeout or socket shutdown). 521 */ 522 static void nfs_readpage_result_full(struct rpc_task *task, void *calldata) 523 { 524 struct nfs_read_data *data = calldata; 525 526 /* 527 * Note: nfs_readpage_result may change the values of 528 * data->args. In the multi-page case, we therefore need 529 * to ensure that we call the next nfs_readpage_set_page_uptodate() 530 * first in the multi-page case. 531 */ 532 if (likely(task->tk_status >= 0)) { 533 nfs_readpage_truncate_uninitialised_page(data); 534 nfs_readpage_set_pages_uptodate(data); 535 } else 536 nfs_readpage_set_pages_error(data); 537 if (nfs_readpage_result(task, data) != 0) 538 return; 539 while (!list_empty(&data->pages)) { 540 struct nfs_page *req = nfs_list_entry(data->pages.next); 541 542 nfs_list_remove_request(req); 543 nfs_readpage_release(req); 544 } 545 } 546 547 static const struct rpc_call_ops nfs_read_full_ops = { 548 .rpc_call_done = nfs_readpage_result_full, 549 .rpc_release = nfs_readdata_release, 550 }; 551 552 /* 553 * This is the callback from RPC telling us whether a reply was 554 * received or some error occurred (timeout or socket shutdown). 555 */ 556 int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data) 557 { 558 struct nfs_readargs *argp = &data->args; 559 struct nfs_readres *resp = &data->res; 560 int status; 561 562 dprintk("NFS: %4d nfs_readpage_result, (status %d)\n", 563 task->tk_pid, task->tk_status); 564 565 status = NFS_PROTO(data->inode)->read_done(task, data); 566 if (status != 0) 567 return status; 568 569 nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, resp->count); 570 571 /* Is this a short read? */ 572 if (task->tk_status >= 0 && resp->count < argp->count && !resp->eof) { 573 nfs_inc_stats(data->inode, NFSIOS_SHORTREAD); 574 /* Has the server at least made some progress? */ 575 if (resp->count != 0) { 576 /* Yes, so retry the read at the end of the data */ 577 argp->offset += resp->count; 578 argp->pgbase += resp->count; 579 argp->count -= resp->count; 580 rpc_restart_call(task); 581 return -EAGAIN; 582 } 583 task->tk_status = -EIO; 584 } 585 spin_lock(&data->inode->i_lock); 586 NFS_I(data->inode)->cache_validity |= NFS_INO_INVALID_ATIME; 587 spin_unlock(&data->inode->i_lock); 588 return 0; 589 } 590 591 /* 592 * Read a page over NFS. 593 * We read the page synchronously in the following case: 594 * - The error flag is set for this page. This happens only when a 595 * previous async read operation failed. 596 */ 597 int nfs_readpage(struct file *file, struct page *page) 598 { 599 struct nfs_open_context *ctx; 600 struct inode *inode = page->mapping->host; 601 int error; 602 603 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n", 604 page, PAGE_CACHE_SIZE, page->index); 605 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); 606 nfs_add_stats(inode, NFSIOS_READPAGES, 1); 607 608 /* 609 * Try to flush any pending writes to the file.. 610 * 611 * NOTE! Because we own the page lock, there cannot 612 * be any new pending writes generated at this point 613 * for this page (other pages can be written to). 614 */ 615 error = nfs_wb_page(inode, page); 616 if (error) 617 goto out_error; 618 619 if (file == NULL) { 620 ctx = nfs_find_open_context(inode, NULL, FMODE_READ); 621 if (ctx == NULL) 622 return -EBADF; 623 } else 624 ctx = get_nfs_open_context((struct nfs_open_context *) 625 file->private_data); 626 if (!IS_SYNC(inode)) { 627 error = nfs_readpage_async(ctx, inode, page); 628 goto out; 629 } 630 631 error = nfs_readpage_sync(ctx, inode, page); 632 if (error < 0 && IS_SWAPFILE(inode)) 633 printk("Aiee.. nfs swap-in of page failed!\n"); 634 out: 635 put_nfs_open_context(ctx); 636 return error; 637 638 out_error: 639 unlock_page(page); 640 return error; 641 } 642 643 struct nfs_readdesc { 644 struct list_head *head; 645 struct nfs_open_context *ctx; 646 }; 647 648 static int 649 readpage_async_filler(void *data, struct page *page) 650 { 651 struct nfs_readdesc *desc = (struct nfs_readdesc *)data; 652 struct inode *inode = page->mapping->host; 653 struct nfs_page *new; 654 unsigned int len; 655 656 nfs_wb_page(inode, page); 657 len = nfs_page_length(inode, page); 658 if (len == 0) 659 return nfs_return_empty_page(page); 660 new = nfs_create_request(desc->ctx, inode, page, 0, len); 661 if (IS_ERR(new)) { 662 SetPageError(page); 663 unlock_page(page); 664 return PTR_ERR(new); 665 } 666 if (len < PAGE_CACHE_SIZE) 667 memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); 668 nfs_list_add_request(new, desc->head); 669 return 0; 670 } 671 672 int nfs_readpages(struct file *filp, struct address_space *mapping, 673 struct list_head *pages, unsigned nr_pages) 674 { 675 LIST_HEAD(head); 676 struct nfs_readdesc desc = { 677 .head = &head, 678 }; 679 struct inode *inode = mapping->host; 680 struct nfs_server *server = NFS_SERVER(inode); 681 int ret; 682 683 dprintk("NFS: nfs_readpages (%s/%Ld %d)\n", 684 inode->i_sb->s_id, 685 (long long)NFS_FILEID(inode), 686 nr_pages); 687 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES); 688 689 if (filp == NULL) { 690 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ); 691 if (desc.ctx == NULL) 692 return -EBADF; 693 } else 694 desc.ctx = get_nfs_open_context((struct nfs_open_context *) 695 filp->private_data); 696 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); 697 if (!list_empty(&head)) { 698 int err = nfs_pagein_list(&head, server->rpages); 699 if (!ret) 700 nfs_add_stats(inode, NFSIOS_READPAGES, err); 701 ret = err; 702 } 703 put_nfs_open_context(desc.ctx); 704 return ret; 705 } 706 707 int __init nfs_init_readpagecache(void) 708 { 709 nfs_rdata_cachep = kmem_cache_create("nfs_read_data", 710 sizeof(struct nfs_read_data), 711 0, SLAB_HWCACHE_ALIGN, 712 NULL, NULL); 713 if (nfs_rdata_cachep == NULL) 714 return -ENOMEM; 715 716 nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ, 717 nfs_rdata_cachep); 718 if (nfs_rdata_mempool == NULL) 719 return -ENOMEM; 720 721 return 0; 722 } 723 724 void nfs_destroy_readpagecache(void) 725 { 726 mempool_destroy(nfs_rdata_mempool); 727 if (kmem_cache_destroy(nfs_rdata_cachep)) 728 printk(KERN_INFO "nfs_read_data: not all structures were freed\n"); 729 } 730