1 /* 2 * linux/fs/nfs/pagelist.c 3 * 4 * A set of helper functions for managing NFS read and write requests. 5 * The main purpose of these routines is to provide support for the 6 * coalescing of several requests into a single RPC call. 7 * 8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no> 9 * 10 */ 11 12 #include <linux/slab.h> 13 #include <linux/file.h> 14 #include <linux/sched.h> 15 #include <linux/sunrpc/clnt.h> 16 #include <linux/nfs.h> 17 #include <linux/nfs3.h> 18 #include <linux/nfs4.h> 19 #include <linux/nfs_page.h> 20 #include <linux/nfs_fs.h> 21 #include <linux/nfs_mount.h> 22 #include <linux/export.h> 23 24 #include "internal.h" 25 #include "pnfs.h" 26 27 #define NFSDBG_FACILITY NFSDBG_PAGECACHE 28 29 static struct kmem_cache *nfs_page_cachep; 30 static const struct rpc_call_ops nfs_pgio_common_ops; 31 32 static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount) 33 { 34 p->npages = pagecount; 35 if (pagecount <= ARRAY_SIZE(p->page_array)) 36 p->pagevec = p->page_array; 37 else { 38 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL); 39 if (!p->pagevec) 40 p->npages = 0; 41 } 42 return p->pagevec != NULL; 43 } 44 45 void nfs_pgheader_init(struct nfs_pageio_descriptor *desc, 46 struct nfs_pgio_header *hdr, 47 void (*release)(struct nfs_pgio_header *hdr)) 48 { 49 hdr->req = nfs_list_entry(desc->pg_list.next); 50 hdr->inode = desc->pg_inode; 51 hdr->cred = hdr->req->wb_context->cred; 52 hdr->io_start = req_offset(hdr->req); 53 hdr->good_bytes = desc->pg_count; 54 hdr->dreq = desc->pg_dreq; 55 hdr->layout_private = desc->pg_layout_private; 56 hdr->release = release; 57 hdr->completion_ops = desc->pg_completion_ops; 58 if (hdr->completion_ops->init_hdr) 59 hdr->completion_ops->init_hdr(hdr); 60 } 61 EXPORT_SYMBOL_GPL(nfs_pgheader_init); 62 63 void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos) 64 { 65 spin_lock(&hdr->lock); 66 if (pos < hdr->io_start + hdr->good_bytes) { 67 set_bit(NFS_IOHDR_ERROR, &hdr->flags); 68 clear_bit(NFS_IOHDR_EOF, &hdr->flags); 69 hdr->good_bytes = pos - hdr->io_start; 70 hdr->error = error; 71 } 72 spin_unlock(&hdr->lock); 73 } 74 75 static inline struct nfs_page * 76 nfs_page_alloc(void) 77 { 78 struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO); 79 if (p) 80 INIT_LIST_HEAD(&p->wb_list); 81 return p; 82 } 83 84 static inline void 85 nfs_page_free(struct nfs_page *p) 86 { 87 kmem_cache_free(nfs_page_cachep, p); 88 } 89 90 static void 91 nfs_iocounter_inc(struct nfs_io_counter *c) 92 { 93 atomic_inc(&c->io_count); 94 } 95 96 static void 97 nfs_iocounter_dec(struct nfs_io_counter *c) 98 { 99 if (atomic_dec_and_test(&c->io_count)) { 100 clear_bit(NFS_IO_INPROGRESS, &c->flags); 101 smp_mb__after_atomic(); 102 wake_up_bit(&c->flags, NFS_IO_INPROGRESS); 103 } 104 } 105 106 static int 107 __nfs_iocounter_wait(struct nfs_io_counter *c) 108 { 109 wait_queue_head_t *wq = bit_waitqueue(&c->flags, NFS_IO_INPROGRESS); 110 DEFINE_WAIT_BIT(q, &c->flags, NFS_IO_INPROGRESS); 111 int ret = 0; 112 113 do { 114 prepare_to_wait(wq, &q.wait, TASK_KILLABLE); 115 set_bit(NFS_IO_INPROGRESS, &c->flags); 116 if (atomic_read(&c->io_count) == 0) 117 break; 118 ret = nfs_wait_bit_killable(&q.key); 119 } while (atomic_read(&c->io_count) != 0 && !ret); 120 finish_wait(wq, &q.wait); 121 return ret; 122 } 123 124 /** 125 * nfs_iocounter_wait - wait for i/o to complete 126 * @c: nfs_io_counter to use 127 * 128 * returns -ERESTARTSYS if interrupted by a fatal signal. 129 * Otherwise returns 0 once the io_count hits 0. 130 */ 131 int 132 nfs_iocounter_wait(struct nfs_io_counter *c) 133 { 134 if (atomic_read(&c->io_count) == 0) 135 return 0; 136 return __nfs_iocounter_wait(c); 137 } 138 139 /* 140 * nfs_page_group_lock - lock the head of the page group 141 * @req - request in group that is to be locked 142 * @nonblock - if true don't block waiting for lock 143 * 144 * this lock must be held if modifying the page group list 145 * 146 * return 0 on success, < 0 on error: -EDELAY if nonblocking or the 147 * result from wait_on_bit_lock 148 * 149 * NOTE: calling with nonblock=false should always have set the 150 * lock bit (see fs/buffer.c and other uses of wait_on_bit_lock 151 * with TASK_UNINTERRUPTIBLE), so there is no need to check the result. 152 */ 153 int 154 nfs_page_group_lock(struct nfs_page *req, bool nonblock) 155 { 156 struct nfs_page *head = req->wb_head; 157 158 WARN_ON_ONCE(head != head->wb_head); 159 160 if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags)) 161 return 0; 162 163 if (!nonblock) 164 return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, 165 TASK_UNINTERRUPTIBLE); 166 167 return -EAGAIN; 168 } 169 170 /* 171 * nfs_page_group_lock_wait - wait for the lock to clear, but don't grab it 172 * @req - a request in the group 173 * 174 * This is a blocking call to wait for the group lock to be cleared. 175 */ 176 void 177 nfs_page_group_lock_wait(struct nfs_page *req) 178 { 179 struct nfs_page *head = req->wb_head; 180 181 WARN_ON_ONCE(head != head->wb_head); 182 183 wait_on_bit(&head->wb_flags, PG_HEADLOCK, 184 TASK_UNINTERRUPTIBLE); 185 } 186 187 /* 188 * nfs_page_group_unlock - unlock the head of the page group 189 * @req - request in group that is to be unlocked 190 */ 191 void 192 nfs_page_group_unlock(struct nfs_page *req) 193 { 194 struct nfs_page *head = req->wb_head; 195 196 WARN_ON_ONCE(head != head->wb_head); 197 198 smp_mb__before_atomic(); 199 clear_bit(PG_HEADLOCK, &head->wb_flags); 200 smp_mb__after_atomic(); 201 wake_up_bit(&head->wb_flags, PG_HEADLOCK); 202 } 203 204 /* 205 * nfs_page_group_sync_on_bit_locked 206 * 207 * must be called with page group lock held 208 */ 209 static bool 210 nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit) 211 { 212 struct nfs_page *head = req->wb_head; 213 struct nfs_page *tmp; 214 215 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_flags)); 216 WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags)); 217 218 tmp = req->wb_this_page; 219 while (tmp != req) { 220 if (!test_bit(bit, &tmp->wb_flags)) 221 return false; 222 tmp = tmp->wb_this_page; 223 } 224 225 /* true! reset all bits */ 226 tmp = req; 227 do { 228 clear_bit(bit, &tmp->wb_flags); 229 tmp = tmp->wb_this_page; 230 } while (tmp != req); 231 232 return true; 233 } 234 235 /* 236 * nfs_page_group_sync_on_bit - set bit on current request, but only 237 * return true if the bit is set for all requests in page group 238 * @req - request in page group 239 * @bit - PG_* bit that is used to sync page group 240 */ 241 bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit) 242 { 243 bool ret; 244 245 nfs_page_group_lock(req, false); 246 ret = nfs_page_group_sync_on_bit_locked(req, bit); 247 nfs_page_group_unlock(req); 248 249 return ret; 250 } 251 252 /* 253 * nfs_page_group_init - Initialize the page group linkage for @req 254 * @req - a new nfs request 255 * @prev - the previous request in page group, or NULL if @req is the first 256 * or only request in the group (the head). 257 */ 258 static inline void 259 nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev) 260 { 261 WARN_ON_ONCE(prev == req); 262 263 if (!prev) { 264 /* a head request */ 265 req->wb_head = req; 266 req->wb_this_page = req; 267 } else { 268 /* a subrequest */ 269 WARN_ON_ONCE(prev->wb_this_page != prev->wb_head); 270 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags)); 271 req->wb_head = prev->wb_head; 272 req->wb_this_page = prev->wb_this_page; 273 prev->wb_this_page = req; 274 275 /* All subrequests take a ref on the head request until 276 * nfs_page_group_destroy is called */ 277 kref_get(&req->wb_head->wb_kref); 278 279 /* grab extra ref if head request has extra ref from 280 * the write/commit path to handle handoff between write 281 * and commit lists */ 282 if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) { 283 set_bit(PG_INODE_REF, &req->wb_flags); 284 kref_get(&req->wb_kref); 285 } 286 } 287 } 288 289 /* 290 * nfs_page_group_destroy - sync the destruction of page groups 291 * @req - request that no longer needs the page group 292 * 293 * releases the page group reference from each member once all 294 * members have called this function. 295 */ 296 static void 297 nfs_page_group_destroy(struct kref *kref) 298 { 299 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); 300 struct nfs_page *tmp, *next; 301 302 /* subrequests must release the ref on the head request */ 303 if (req->wb_head != req) 304 nfs_release_request(req->wb_head); 305 306 if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN)) 307 return; 308 309 tmp = req; 310 do { 311 next = tmp->wb_this_page; 312 /* unlink and free */ 313 tmp->wb_this_page = tmp; 314 tmp->wb_head = tmp; 315 nfs_free_request(tmp); 316 tmp = next; 317 } while (tmp != req); 318 } 319 320 /** 321 * nfs_create_request - Create an NFS read/write request. 322 * @ctx: open context to use 323 * @page: page to write 324 * @last: last nfs request created for this page group or NULL if head 325 * @offset: starting offset within the page for the write 326 * @count: number of bytes to read/write 327 * 328 * The page must be locked by the caller. This makes sure we never 329 * create two different requests for the same page. 330 * User should ensure it is safe to sleep in this function. 331 */ 332 struct nfs_page * 333 nfs_create_request(struct nfs_open_context *ctx, struct page *page, 334 struct nfs_page *last, unsigned int offset, 335 unsigned int count) 336 { 337 struct nfs_page *req; 338 struct nfs_lock_context *l_ctx; 339 340 if (test_bit(NFS_CONTEXT_BAD, &ctx->flags)) 341 return ERR_PTR(-EBADF); 342 /* try to allocate the request struct */ 343 req = nfs_page_alloc(); 344 if (req == NULL) 345 return ERR_PTR(-ENOMEM); 346 347 /* get lock context early so we can deal with alloc failures */ 348 l_ctx = nfs_get_lock_context(ctx); 349 if (IS_ERR(l_ctx)) { 350 nfs_page_free(req); 351 return ERR_CAST(l_ctx); 352 } 353 req->wb_lock_context = l_ctx; 354 nfs_iocounter_inc(&l_ctx->io_count); 355 356 /* Initialize the request struct. Initially, we assume a 357 * long write-back delay. This will be adjusted in 358 * update_nfs_request below if the region is not locked. */ 359 req->wb_page = page; 360 req->wb_index = page_file_index(page); 361 page_cache_get(page); 362 req->wb_offset = offset; 363 req->wb_pgbase = offset; 364 req->wb_bytes = count; 365 req->wb_context = get_nfs_open_context(ctx); 366 kref_init(&req->wb_kref); 367 nfs_page_group_init(req, last); 368 return req; 369 } 370 371 /** 372 * nfs_unlock_request - Unlock request and wake up sleepers. 373 * @req: 374 */ 375 void nfs_unlock_request(struct nfs_page *req) 376 { 377 if (!NFS_WBACK_BUSY(req)) { 378 printk(KERN_ERR "NFS: Invalid unlock attempted\n"); 379 BUG(); 380 } 381 smp_mb__before_atomic(); 382 clear_bit(PG_BUSY, &req->wb_flags); 383 smp_mb__after_atomic(); 384 wake_up_bit(&req->wb_flags, PG_BUSY); 385 } 386 387 /** 388 * nfs_unlock_and_release_request - Unlock request and release the nfs_page 389 * @req: 390 */ 391 void nfs_unlock_and_release_request(struct nfs_page *req) 392 { 393 nfs_unlock_request(req); 394 nfs_release_request(req); 395 } 396 397 /* 398 * nfs_clear_request - Free up all resources allocated to the request 399 * @req: 400 * 401 * Release page and open context resources associated with a read/write 402 * request after it has completed. 403 */ 404 static void nfs_clear_request(struct nfs_page *req) 405 { 406 struct page *page = req->wb_page; 407 struct nfs_open_context *ctx = req->wb_context; 408 struct nfs_lock_context *l_ctx = req->wb_lock_context; 409 410 if (page != NULL) { 411 page_cache_release(page); 412 req->wb_page = NULL; 413 } 414 if (l_ctx != NULL) { 415 nfs_iocounter_dec(&l_ctx->io_count); 416 nfs_put_lock_context(l_ctx); 417 req->wb_lock_context = NULL; 418 } 419 if (ctx != NULL) { 420 put_nfs_open_context(ctx); 421 req->wb_context = NULL; 422 } 423 } 424 425 /** 426 * nfs_release_request - Release the count on an NFS read/write request 427 * @req: request to release 428 * 429 * Note: Should never be called with the spinlock held! 430 */ 431 void nfs_free_request(struct nfs_page *req) 432 { 433 WARN_ON_ONCE(req->wb_this_page != req); 434 435 /* extra debug: make sure no sync bits are still set */ 436 WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags)); 437 WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags)); 438 WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags)); 439 WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags)); 440 WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags)); 441 442 /* Release struct file and open context */ 443 nfs_clear_request(req); 444 nfs_page_free(req); 445 } 446 447 void nfs_release_request(struct nfs_page *req) 448 { 449 kref_put(&req->wb_kref, nfs_page_group_destroy); 450 } 451 452 /** 453 * nfs_wait_on_request - Wait for a request to complete. 454 * @req: request to wait upon. 455 * 456 * Interruptible by fatal signals only. 457 * The user is responsible for holding a count on the request. 458 */ 459 int 460 nfs_wait_on_request(struct nfs_page *req) 461 { 462 return wait_on_bit_io(&req->wb_flags, PG_BUSY, 463 TASK_UNINTERRUPTIBLE); 464 } 465 466 /* 467 * nfs_generic_pg_test - determine if requests can be coalesced 468 * @desc: pointer to descriptor 469 * @prev: previous request in desc, or NULL 470 * @req: this request 471 * 472 * Returns zero if @req can be coalesced into @desc, otherwise it returns 473 * the size of the request. 474 */ 475 size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, 476 struct nfs_page *prev, struct nfs_page *req) 477 { 478 if (desc->pg_count > desc->pg_bsize) { 479 /* should never happen */ 480 WARN_ON_ONCE(1); 481 return 0; 482 } 483 484 return min(desc->pg_bsize - desc->pg_count, (size_t)req->wb_bytes); 485 } 486 EXPORT_SYMBOL_GPL(nfs_generic_pg_test); 487 488 struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops) 489 { 490 struct nfs_pgio_header *hdr = ops->rw_alloc_header(); 491 492 if (hdr) { 493 INIT_LIST_HEAD(&hdr->pages); 494 spin_lock_init(&hdr->lock); 495 hdr->rw_ops = ops; 496 } 497 return hdr; 498 } 499 EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc); 500 501 /* 502 * nfs_pgio_header_free - Free a read or write header 503 * @hdr: The header to free 504 */ 505 void nfs_pgio_header_free(struct nfs_pgio_header *hdr) 506 { 507 hdr->rw_ops->rw_free_header(hdr); 508 } 509 EXPORT_SYMBOL_GPL(nfs_pgio_header_free); 510 511 /** 512 * nfs_pgio_data_destroy - make @hdr suitable for reuse 513 * 514 * Frees memory and releases refs from nfs_generic_pgio, so that it may 515 * be called again. 516 * 517 * @hdr: A header that has had nfs_generic_pgio called 518 */ 519 void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr) 520 { 521 put_nfs_open_context(hdr->args.context); 522 if (hdr->page_array.pagevec != hdr->page_array.page_array) 523 kfree(hdr->page_array.pagevec); 524 } 525 EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy); 526 527 /** 528 * nfs_pgio_rpcsetup - Set up arguments for a pageio call 529 * @hdr: The pageio hdr 530 * @count: Number of bytes to read 531 * @offset: Initial offset 532 * @how: How to commit data (writes only) 533 * @cinfo: Commit information for the call (writes only) 534 */ 535 static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr, 536 unsigned int count, unsigned int offset, 537 int how, struct nfs_commit_info *cinfo) 538 { 539 struct nfs_page *req = hdr->req; 540 541 /* Set up the RPC argument and reply structs 542 * NB: take care not to mess about with hdr->commit et al. */ 543 544 hdr->args.fh = NFS_FH(hdr->inode); 545 hdr->args.offset = req_offset(req) + offset; 546 /* pnfs_set_layoutcommit needs this */ 547 hdr->mds_offset = hdr->args.offset; 548 hdr->args.pgbase = req->wb_pgbase + offset; 549 hdr->args.pages = hdr->page_array.pagevec; 550 hdr->args.count = count; 551 hdr->args.context = get_nfs_open_context(req->wb_context); 552 hdr->args.lock_context = req->wb_lock_context; 553 hdr->args.stable = NFS_UNSTABLE; 554 switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) { 555 case 0: 556 break; 557 case FLUSH_COND_STABLE: 558 if (nfs_reqs_to_commit(cinfo)) 559 break; 560 default: 561 hdr->args.stable = NFS_FILE_SYNC; 562 } 563 564 hdr->res.fattr = &hdr->fattr; 565 hdr->res.count = count; 566 hdr->res.eof = 0; 567 hdr->res.verf = &hdr->verf; 568 nfs_fattr_init(&hdr->fattr); 569 } 570 571 /** 572 * nfs_pgio_prepare - Prepare pageio hdr to go over the wire 573 * @task: The current task 574 * @calldata: pageio header to prepare 575 */ 576 static void nfs_pgio_prepare(struct rpc_task *task, void *calldata) 577 { 578 struct nfs_pgio_header *hdr = calldata; 579 int err; 580 err = NFS_PROTO(hdr->inode)->pgio_rpc_prepare(task, hdr); 581 if (err) 582 rpc_exit(task, err); 583 } 584 585 int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr, 586 const struct rpc_call_ops *call_ops, int how, int flags) 587 { 588 struct rpc_task *task; 589 struct rpc_message msg = { 590 .rpc_argp = &hdr->args, 591 .rpc_resp = &hdr->res, 592 .rpc_cred = hdr->cred, 593 }; 594 struct rpc_task_setup task_setup_data = { 595 .rpc_client = clnt, 596 .task = &hdr->task, 597 .rpc_message = &msg, 598 .callback_ops = call_ops, 599 .callback_data = hdr, 600 .workqueue = nfsiod_workqueue, 601 .flags = RPC_TASK_ASYNC | flags, 602 }; 603 int ret = 0; 604 605 hdr->rw_ops->rw_initiate(hdr, &msg, &task_setup_data, how); 606 607 dprintk("NFS: %5u initiated pgio call " 608 "(req %s/%llu, %u bytes @ offset %llu)\n", 609 hdr->task.tk_pid, 610 hdr->inode->i_sb->s_id, 611 (unsigned long long)NFS_FILEID(hdr->inode), 612 hdr->args.count, 613 (unsigned long long)hdr->args.offset); 614 615 task = rpc_run_task(&task_setup_data); 616 if (IS_ERR(task)) { 617 ret = PTR_ERR(task); 618 goto out; 619 } 620 if (how & FLUSH_SYNC) { 621 ret = rpc_wait_for_completion_task(task); 622 if (ret == 0) 623 ret = task->tk_status; 624 } 625 rpc_put_task(task); 626 out: 627 return ret; 628 } 629 EXPORT_SYMBOL_GPL(nfs_initiate_pgio); 630 631 /** 632 * nfs_pgio_error - Clean up from a pageio error 633 * @desc: IO descriptor 634 * @hdr: pageio header 635 */ 636 static int nfs_pgio_error(struct nfs_pageio_descriptor *desc, 637 struct nfs_pgio_header *hdr) 638 { 639 set_bit(NFS_IOHDR_REDO, &hdr->flags); 640 nfs_pgio_data_destroy(hdr); 641 hdr->completion_ops->completion(hdr); 642 desc->pg_completion_ops->error_cleanup(&desc->pg_list); 643 return -ENOMEM; 644 } 645 646 /** 647 * nfs_pgio_release - Release pageio data 648 * @calldata: The pageio header to release 649 */ 650 static void nfs_pgio_release(void *calldata) 651 { 652 struct nfs_pgio_header *hdr = calldata; 653 if (hdr->rw_ops->rw_release) 654 hdr->rw_ops->rw_release(hdr); 655 nfs_pgio_data_destroy(hdr); 656 hdr->completion_ops->completion(hdr); 657 } 658 659 /** 660 * nfs_pageio_init - initialise a page io descriptor 661 * @desc: pointer to descriptor 662 * @inode: pointer to inode 663 * @doio: pointer to io function 664 * @bsize: io block size 665 * @io_flags: extra parameters for the io function 666 */ 667 void nfs_pageio_init(struct nfs_pageio_descriptor *desc, 668 struct inode *inode, 669 const struct nfs_pageio_ops *pg_ops, 670 const struct nfs_pgio_completion_ops *compl_ops, 671 const struct nfs_rw_ops *rw_ops, 672 size_t bsize, 673 int io_flags) 674 { 675 INIT_LIST_HEAD(&desc->pg_list); 676 desc->pg_bytes_written = 0; 677 desc->pg_count = 0; 678 desc->pg_bsize = bsize; 679 desc->pg_base = 0; 680 desc->pg_moreio = 0; 681 desc->pg_recoalesce = 0; 682 desc->pg_inode = inode; 683 desc->pg_ops = pg_ops; 684 desc->pg_completion_ops = compl_ops; 685 desc->pg_rw_ops = rw_ops; 686 desc->pg_ioflags = io_flags; 687 desc->pg_error = 0; 688 desc->pg_lseg = NULL; 689 desc->pg_dreq = NULL; 690 desc->pg_layout_private = NULL; 691 } 692 EXPORT_SYMBOL_GPL(nfs_pageio_init); 693 694 /** 695 * nfs_pgio_result - Basic pageio error handling 696 * @task: The task that ran 697 * @calldata: Pageio header to check 698 */ 699 static void nfs_pgio_result(struct rpc_task *task, void *calldata) 700 { 701 struct nfs_pgio_header *hdr = calldata; 702 struct inode *inode = hdr->inode; 703 704 dprintk("NFS: %s: %5u, (status %d)\n", __func__, 705 task->tk_pid, task->tk_status); 706 707 if (hdr->rw_ops->rw_done(task, hdr, inode) != 0) 708 return; 709 if (task->tk_status < 0) 710 nfs_set_pgio_error(hdr, task->tk_status, hdr->args.offset); 711 else 712 hdr->rw_ops->rw_result(task, hdr); 713 } 714 715 /* 716 * Create an RPC task for the given read or write request and kick it. 717 * The page must have been locked by the caller. 718 * 719 * It may happen that the page we're passed is not marked dirty. 720 * This is the case if nfs_updatepage detects a conflicting request 721 * that has been written but not committed. 722 */ 723 int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, 724 struct nfs_pgio_header *hdr) 725 { 726 struct nfs_page *req; 727 struct page **pages, 728 *last_page; 729 struct list_head *head = &desc->pg_list; 730 struct nfs_commit_info cinfo; 731 unsigned int pagecount, pageused; 732 733 pagecount = nfs_page_array_len(desc->pg_base, desc->pg_count); 734 if (!nfs_pgarray_set(&hdr->page_array, pagecount)) 735 return nfs_pgio_error(desc, hdr); 736 737 nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq); 738 pages = hdr->page_array.pagevec; 739 last_page = NULL; 740 pageused = 0; 741 while (!list_empty(head)) { 742 req = nfs_list_entry(head->next); 743 nfs_list_remove_request(req); 744 nfs_list_add_request(req, &hdr->pages); 745 746 if (WARN_ON_ONCE(pageused >= pagecount)) 747 return nfs_pgio_error(desc, hdr); 748 749 if (!last_page || last_page != req->wb_page) { 750 *pages++ = last_page = req->wb_page; 751 pageused++; 752 } 753 } 754 if (WARN_ON_ONCE(pageused != pagecount)) 755 return nfs_pgio_error(desc, hdr); 756 757 if ((desc->pg_ioflags & FLUSH_COND_STABLE) && 758 (desc->pg_moreio || nfs_reqs_to_commit(&cinfo))) 759 desc->pg_ioflags &= ~FLUSH_COND_STABLE; 760 761 /* Set up the argument struct */ 762 nfs_pgio_rpcsetup(hdr, desc->pg_count, 0, desc->pg_ioflags, &cinfo); 763 desc->pg_rpc_callops = &nfs_pgio_common_ops; 764 return 0; 765 } 766 EXPORT_SYMBOL_GPL(nfs_generic_pgio); 767 768 static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc) 769 { 770 struct nfs_pgio_header *hdr; 771 int ret; 772 773 hdr = nfs_pgio_header_alloc(desc->pg_rw_ops); 774 if (!hdr) { 775 desc->pg_completion_ops->error_cleanup(&desc->pg_list); 776 return -ENOMEM; 777 } 778 nfs_pgheader_init(desc, hdr, nfs_pgio_header_free); 779 ret = nfs_generic_pgio(desc, hdr); 780 if (ret == 0) 781 ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode), 782 hdr, desc->pg_rpc_callops, 783 desc->pg_ioflags, 0); 784 return ret; 785 } 786 787 static bool nfs_match_open_context(const struct nfs_open_context *ctx1, 788 const struct nfs_open_context *ctx2) 789 { 790 return ctx1->cred == ctx2->cred && ctx1->state == ctx2->state; 791 } 792 793 static bool nfs_match_lock_context(const struct nfs_lock_context *l1, 794 const struct nfs_lock_context *l2) 795 { 796 return l1->lockowner.l_owner == l2->lockowner.l_owner 797 && l1->lockowner.l_pid == l2->lockowner.l_pid; 798 } 799 800 /** 801 * nfs_can_coalesce_requests - test two requests for compatibility 802 * @prev: pointer to nfs_page 803 * @req: pointer to nfs_page 804 * 805 * The nfs_page structures 'prev' and 'req' are compared to ensure that the 806 * page data area they describe is contiguous, and that their RPC 807 * credentials, NFSv4 open state, and lockowners are the same. 808 * 809 * Return 'true' if this is the case, else return 'false'. 810 */ 811 static bool nfs_can_coalesce_requests(struct nfs_page *prev, 812 struct nfs_page *req, 813 struct nfs_pageio_descriptor *pgio) 814 { 815 size_t size; 816 817 if (prev) { 818 if (!nfs_match_open_context(req->wb_context, prev->wb_context)) 819 return false; 820 if (req->wb_context->dentry->d_inode->i_flock != NULL && 821 !nfs_match_lock_context(req->wb_lock_context, 822 prev->wb_lock_context)) 823 return false; 824 if (req_offset(req) != req_offset(prev) + prev->wb_bytes) 825 return false; 826 if (req->wb_page == prev->wb_page) { 827 if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes) 828 return false; 829 } else { 830 if (req->wb_pgbase != 0 || 831 prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) 832 return false; 833 } 834 } 835 size = pgio->pg_ops->pg_test(pgio, prev, req); 836 WARN_ON_ONCE(size > req->wb_bytes); 837 if (size && size < req->wb_bytes) 838 req->wb_bytes = size; 839 return size > 0; 840 } 841 842 /** 843 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list. 844 * @desc: destination io descriptor 845 * @req: request 846 * 847 * Returns true if the request 'req' was successfully coalesced into the 848 * existing list of pages 'desc'. 849 */ 850 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, 851 struct nfs_page *req) 852 { 853 struct nfs_page *prev = NULL; 854 if (desc->pg_count != 0) { 855 prev = nfs_list_entry(desc->pg_list.prev); 856 } else { 857 if (desc->pg_ops->pg_init) 858 desc->pg_ops->pg_init(desc, req); 859 desc->pg_base = req->wb_pgbase; 860 } 861 if (!nfs_can_coalesce_requests(prev, req, desc)) 862 return 0; 863 nfs_list_remove_request(req); 864 nfs_list_add_request(req, &desc->pg_list); 865 desc->pg_count += req->wb_bytes; 866 return 1; 867 } 868 869 /* 870 * Helper for nfs_pageio_add_request and nfs_pageio_complete 871 */ 872 static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) 873 { 874 if (!list_empty(&desc->pg_list)) { 875 int error = desc->pg_ops->pg_doio(desc); 876 if (error < 0) 877 desc->pg_error = error; 878 else 879 desc->pg_bytes_written += desc->pg_count; 880 } 881 if (list_empty(&desc->pg_list)) { 882 desc->pg_count = 0; 883 desc->pg_base = 0; 884 } 885 } 886 887 /** 888 * nfs_pageio_add_request - Attempt to coalesce a request into a page list. 889 * @desc: destination io descriptor 890 * @req: request 891 * 892 * This may split a request into subrequests which are all part of the 893 * same page group. 894 * 895 * Returns true if the request 'req' was successfully coalesced into the 896 * existing list of pages 'desc'. 897 */ 898 static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, 899 struct nfs_page *req) 900 { 901 struct nfs_page *subreq; 902 unsigned int bytes_left = 0; 903 unsigned int offset, pgbase; 904 905 nfs_page_group_lock(req, false); 906 907 subreq = req; 908 bytes_left = subreq->wb_bytes; 909 offset = subreq->wb_offset; 910 pgbase = subreq->wb_pgbase; 911 912 do { 913 if (!nfs_pageio_do_add_request(desc, subreq)) { 914 /* make sure pg_test call(s) did nothing */ 915 WARN_ON_ONCE(subreq->wb_bytes != bytes_left); 916 WARN_ON_ONCE(subreq->wb_offset != offset); 917 WARN_ON_ONCE(subreq->wb_pgbase != pgbase); 918 919 nfs_page_group_unlock(req); 920 desc->pg_moreio = 1; 921 nfs_pageio_doio(desc); 922 if (desc->pg_error < 0) 923 return 0; 924 if (desc->pg_recoalesce) 925 return 0; 926 /* retry add_request for this subreq */ 927 nfs_page_group_lock(req, false); 928 continue; 929 } 930 931 /* check for buggy pg_test call(s) */ 932 WARN_ON_ONCE(subreq->wb_bytes + subreq->wb_pgbase > PAGE_SIZE); 933 WARN_ON_ONCE(subreq->wb_bytes > bytes_left); 934 WARN_ON_ONCE(subreq->wb_bytes == 0); 935 936 bytes_left -= subreq->wb_bytes; 937 offset += subreq->wb_bytes; 938 pgbase += subreq->wb_bytes; 939 940 if (bytes_left) { 941 subreq = nfs_create_request(req->wb_context, 942 req->wb_page, 943 subreq, pgbase, bytes_left); 944 if (IS_ERR(subreq)) 945 goto err_ptr; 946 nfs_lock_request(subreq); 947 subreq->wb_offset = offset; 948 subreq->wb_index = req->wb_index; 949 } 950 } while (bytes_left > 0); 951 952 nfs_page_group_unlock(req); 953 return 1; 954 err_ptr: 955 desc->pg_error = PTR_ERR(subreq); 956 nfs_page_group_unlock(req); 957 return 0; 958 } 959 960 static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) 961 { 962 LIST_HEAD(head); 963 964 do { 965 list_splice_init(&desc->pg_list, &head); 966 desc->pg_bytes_written -= desc->pg_count; 967 desc->pg_count = 0; 968 desc->pg_base = 0; 969 desc->pg_recoalesce = 0; 970 desc->pg_moreio = 0; 971 972 while (!list_empty(&head)) { 973 struct nfs_page *req; 974 975 req = list_first_entry(&head, struct nfs_page, wb_list); 976 nfs_list_remove_request(req); 977 if (__nfs_pageio_add_request(desc, req)) 978 continue; 979 if (desc->pg_error < 0) 980 return 0; 981 break; 982 } 983 } while (desc->pg_recoalesce); 984 return 1; 985 } 986 987 int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, 988 struct nfs_page *req) 989 { 990 int ret; 991 992 do { 993 ret = __nfs_pageio_add_request(desc, req); 994 if (ret) 995 break; 996 if (desc->pg_error < 0) 997 break; 998 ret = nfs_do_recoalesce(desc); 999 } while (ret); 1000 return ret; 1001 } 1002 1003 /* 1004 * nfs_pageio_resend - Transfer requests to new descriptor and resend 1005 * @hdr - the pgio header to move request from 1006 * @desc - the pageio descriptor to add requests to 1007 * 1008 * Try to move each request (nfs_page) from @hdr to @desc then attempt 1009 * to send them. 1010 * 1011 * Returns 0 on success and < 0 on error. 1012 */ 1013 int nfs_pageio_resend(struct nfs_pageio_descriptor *desc, 1014 struct nfs_pgio_header *hdr) 1015 { 1016 LIST_HEAD(failed); 1017 1018 desc->pg_dreq = hdr->dreq; 1019 while (!list_empty(&hdr->pages)) { 1020 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 1021 1022 nfs_list_remove_request(req); 1023 if (!nfs_pageio_add_request(desc, req)) 1024 nfs_list_add_request(req, &failed); 1025 } 1026 nfs_pageio_complete(desc); 1027 if (!list_empty(&failed)) { 1028 list_move(&failed, &hdr->pages); 1029 return -EIO; 1030 } 1031 return 0; 1032 } 1033 EXPORT_SYMBOL_GPL(nfs_pageio_resend); 1034 1035 /** 1036 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor 1037 * @desc: pointer to io descriptor 1038 */ 1039 void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) 1040 { 1041 for (;;) { 1042 nfs_pageio_doio(desc); 1043 if (!desc->pg_recoalesce) 1044 break; 1045 if (!nfs_do_recoalesce(desc)) 1046 break; 1047 } 1048 } 1049 1050 /** 1051 * nfs_pageio_cond_complete - Conditional I/O completion 1052 * @desc: pointer to io descriptor 1053 * @index: page index 1054 * 1055 * It is important to ensure that processes don't try to take locks 1056 * on non-contiguous ranges of pages as that might deadlock. This 1057 * function should be called before attempting to wait on a locked 1058 * nfs_page. It will complete the I/O if the page index 'index' 1059 * is not contiguous with the existing list of pages in 'desc'. 1060 */ 1061 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) 1062 { 1063 if (!list_empty(&desc->pg_list)) { 1064 struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev); 1065 if (index != prev->wb_index + 1) 1066 nfs_pageio_complete(desc); 1067 } 1068 } 1069 1070 int __init nfs_init_nfspagecache(void) 1071 { 1072 nfs_page_cachep = kmem_cache_create("nfs_page", 1073 sizeof(struct nfs_page), 1074 0, SLAB_HWCACHE_ALIGN, 1075 NULL); 1076 if (nfs_page_cachep == NULL) 1077 return -ENOMEM; 1078 1079 return 0; 1080 } 1081 1082 void nfs_destroy_nfspagecache(void) 1083 { 1084 kmem_cache_destroy(nfs_page_cachep); 1085 } 1086 1087 static const struct rpc_call_ops nfs_pgio_common_ops = { 1088 .rpc_call_prepare = nfs_pgio_prepare, 1089 .rpc_call_done = nfs_pgio_result, 1090 .rpc_release = nfs_pgio_release, 1091 }; 1092 1093 const struct nfs_pageio_ops nfs_pgio_rw_ops = { 1094 .pg_test = nfs_generic_pg_test, 1095 .pg_doio = nfs_generic_pg_pgios, 1096 }; 1097