1 /* 2 * pNFS functions to call and manage layout drivers. 3 * 4 * Copyright (c) 2002 [year of first publication] 5 * The Regents of the University of Michigan 6 * All Rights Reserved 7 * 8 * Dean Hildebrand <dhildebz@umich.edu> 9 * 10 * Permission is granted to use, copy, create derivative works, and 11 * redistribute this software and such derivative works for any purpose, 12 * so long as the name of the University of Michigan is not used in 13 * any advertising or publicity pertaining to the use or distribution 14 * of this software without specific, written prior authorization. If 15 * the above copyright notice or any other identification of the 16 * University of Michigan is included in any copy of any portion of 17 * this software, then the disclaimer below must also be included. 18 * 19 * This software is provided as is, without representation or warranty 20 * of any kind either express or implied, including without limitation 21 * the implied warranties of merchantability, fitness for a particular 22 * purpose, or noninfringement. The Regents of the University of 23 * Michigan shall not be liable for any damages, including special, 24 * indirect, incidental, or consequential damages, with respect to any 25 * claim arising out of or in connection with the use of the software, 26 * even if it has been or is hereafter advised of the possibility of 27 * such damages. 28 */ 29 30 #include <linux/nfs_fs.h> 31 #include <linux/nfs_page.h> 32 #include <linux/module.h> 33 #include "internal.h" 34 #include "pnfs.h" 35 #include "iostat.h" 36 37 #define NFSDBG_FACILITY NFSDBG_PNFS 38 39 /* Locking: 40 * 41 * pnfs_spinlock: 42 * protects pnfs_modules_tbl. 43 */ 44 static DEFINE_SPINLOCK(pnfs_spinlock); 45 46 /* 47 * pnfs_modules_tbl holds all pnfs modules 48 */ 49 static LIST_HEAD(pnfs_modules_tbl); 50 51 /* Return the registered pnfs layout driver module matching given id */ 52 static struct pnfs_layoutdriver_type * 53 find_pnfs_driver_locked(u32 id) 54 { 55 struct pnfs_layoutdriver_type *local; 56 57 list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid) 58 if (local->id == id) 59 goto out; 60 local = NULL; 61 out: 62 dprintk("%s: Searching for id %u, found %p\n", __func__, id, local); 63 return local; 64 } 65 66 static struct pnfs_layoutdriver_type * 67 find_pnfs_driver(u32 id) 68 { 69 struct pnfs_layoutdriver_type *local; 70 71 spin_lock(&pnfs_spinlock); 72 local = find_pnfs_driver_locked(id); 73 if (local != NULL && !try_module_get(local->owner)) { 74 dprintk("%s: Could not grab reference on module\n", __func__); 75 local = NULL; 76 } 77 spin_unlock(&pnfs_spinlock); 78 return local; 79 } 80 81 void 82 unset_pnfs_layoutdriver(struct nfs_server *nfss) 83 { 84 if (nfss->pnfs_curr_ld) { 85 if (nfss->pnfs_curr_ld->clear_layoutdriver) 86 nfss->pnfs_curr_ld->clear_layoutdriver(nfss); 87 /* Decrement the MDS count. Purge the deviceid cache if zero */ 88 if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count)) 89 nfs4_deviceid_purge_client(nfss->nfs_client); 90 module_put(nfss->pnfs_curr_ld->owner); 91 } 92 nfss->pnfs_curr_ld = NULL; 93 } 94 95 /* 96 * Try to set the server's pnfs module to the pnfs layout type specified by id. 97 * Currently only one pNFS layout driver per filesystem is supported. 98 * 99 * @id layout type. Zero (illegal layout type) indicates pNFS not in use. 100 */ 101 void 102 set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh, 103 u32 id) 104 { 105 struct pnfs_layoutdriver_type *ld_type = NULL; 106 107 if (id == 0) 108 goto out_no_driver; 109 if (!(server->nfs_client->cl_exchange_flags & 110 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) { 111 printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n", 112 __func__, id, server->nfs_client->cl_exchange_flags); 113 goto out_no_driver; 114 } 115 ld_type = find_pnfs_driver(id); 116 if (!ld_type) { 117 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id); 118 ld_type = find_pnfs_driver(id); 119 if (!ld_type) { 120 dprintk("%s: No pNFS module found for %u.\n", 121 __func__, id); 122 goto out_no_driver; 123 } 124 } 125 server->pnfs_curr_ld = ld_type; 126 if (ld_type->set_layoutdriver 127 && ld_type->set_layoutdriver(server, mntfh)) { 128 printk(KERN_ERR "NFS: %s: Error initializing pNFS layout " 129 "driver %u.\n", __func__, id); 130 module_put(ld_type->owner); 131 goto out_no_driver; 132 } 133 /* Bump the MDS count */ 134 atomic_inc(&server->nfs_client->cl_mds_count); 135 136 dprintk("%s: pNFS module for %u set\n", __func__, id); 137 return; 138 139 out_no_driver: 140 dprintk("%s: Using NFSv4 I/O\n", __func__); 141 server->pnfs_curr_ld = NULL; 142 } 143 144 int 145 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type) 146 { 147 int status = -EINVAL; 148 struct pnfs_layoutdriver_type *tmp; 149 150 if (ld_type->id == 0) { 151 printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__); 152 return status; 153 } 154 if (!ld_type->alloc_lseg || !ld_type->free_lseg) { 155 printk(KERN_ERR "NFS: %s Layout driver must provide " 156 "alloc_lseg and free_lseg.\n", __func__); 157 return status; 158 } 159 160 spin_lock(&pnfs_spinlock); 161 tmp = find_pnfs_driver_locked(ld_type->id); 162 if (!tmp) { 163 list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl); 164 status = 0; 165 dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id, 166 ld_type->name); 167 } else { 168 printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n", 169 __func__, ld_type->id); 170 } 171 spin_unlock(&pnfs_spinlock); 172 173 return status; 174 } 175 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver); 176 177 void 178 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type) 179 { 180 dprintk("%s Deregistering id:%u\n", __func__, ld_type->id); 181 spin_lock(&pnfs_spinlock); 182 list_del(&ld_type->pnfs_tblid); 183 spin_unlock(&pnfs_spinlock); 184 } 185 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver); 186 187 /* 188 * pNFS client layout cache 189 */ 190 191 /* Need to hold i_lock if caller does not already hold reference */ 192 void 193 get_layout_hdr(struct pnfs_layout_hdr *lo) 194 { 195 atomic_inc(&lo->plh_refcount); 196 } 197 198 static struct pnfs_layout_hdr * 199 pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags) 200 { 201 struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld; 202 return ld->alloc_layout_hdr ? ld->alloc_layout_hdr(ino, gfp_flags) : 203 kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags); 204 } 205 206 static void 207 pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo) 208 { 209 struct pnfs_layoutdriver_type *ld = NFS_SERVER(lo->plh_inode)->pnfs_curr_ld; 210 put_rpccred(lo->plh_lc_cred); 211 return ld->alloc_layout_hdr ? ld->free_layout_hdr(lo) : kfree(lo); 212 } 213 214 static void 215 destroy_layout_hdr(struct pnfs_layout_hdr *lo) 216 { 217 dprintk("%s: freeing layout cache %p\n", __func__, lo); 218 BUG_ON(!list_empty(&lo->plh_layouts)); 219 NFS_I(lo->plh_inode)->layout = NULL; 220 pnfs_free_layout_hdr(lo); 221 } 222 223 static void 224 put_layout_hdr_locked(struct pnfs_layout_hdr *lo) 225 { 226 if (atomic_dec_and_test(&lo->plh_refcount)) 227 destroy_layout_hdr(lo); 228 } 229 230 void 231 put_layout_hdr(struct pnfs_layout_hdr *lo) 232 { 233 struct inode *inode = lo->plh_inode; 234 235 if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) { 236 destroy_layout_hdr(lo); 237 spin_unlock(&inode->i_lock); 238 } 239 } 240 241 static void 242 init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg) 243 { 244 INIT_LIST_HEAD(&lseg->pls_list); 245 INIT_LIST_HEAD(&lseg->pls_lc_list); 246 atomic_set(&lseg->pls_refcount, 1); 247 smp_mb(); 248 set_bit(NFS_LSEG_VALID, &lseg->pls_flags); 249 lseg->pls_layout = lo; 250 } 251 252 static void free_lseg(struct pnfs_layout_segment *lseg) 253 { 254 struct inode *ino = lseg->pls_layout->plh_inode; 255 256 NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg); 257 /* Matched by get_layout_hdr in pnfs_insert_layout */ 258 put_layout_hdr(NFS_I(ino)->layout); 259 } 260 261 static void 262 put_lseg_common(struct pnfs_layout_segment *lseg) 263 { 264 struct inode *inode = lseg->pls_layout->plh_inode; 265 266 WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); 267 list_del_init(&lseg->pls_list); 268 if (list_empty(&lseg->pls_layout->plh_segs)) { 269 set_bit(NFS_LAYOUT_DESTROYED, &lseg->pls_layout->plh_flags); 270 /* Matched by initial refcount set in alloc_init_layout_hdr */ 271 put_layout_hdr_locked(lseg->pls_layout); 272 } 273 rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq); 274 } 275 276 void 277 put_lseg(struct pnfs_layout_segment *lseg) 278 { 279 struct inode *inode; 280 281 if (!lseg) 282 return; 283 284 dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg, 285 atomic_read(&lseg->pls_refcount), 286 test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); 287 inode = lseg->pls_layout->plh_inode; 288 if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) { 289 LIST_HEAD(free_me); 290 291 put_lseg_common(lseg); 292 list_add(&lseg->pls_list, &free_me); 293 spin_unlock(&inode->i_lock); 294 pnfs_free_lseg_list(&free_me); 295 } 296 } 297 EXPORT_SYMBOL_GPL(put_lseg); 298 299 static inline u64 300 end_offset(u64 start, u64 len) 301 { 302 u64 end; 303 304 end = start + len; 305 return end >= start ? end : NFS4_MAX_UINT64; 306 } 307 308 /* last octet in a range */ 309 static inline u64 310 last_byte_offset(u64 start, u64 len) 311 { 312 u64 end; 313 314 BUG_ON(!len); 315 end = start + len; 316 return end > start ? end - 1 : NFS4_MAX_UINT64; 317 } 318 319 /* 320 * is l2 fully contained in l1? 321 * start1 end1 322 * [----------------------------------) 323 * start2 end2 324 * [----------------) 325 */ 326 static inline int 327 lo_seg_contained(struct pnfs_layout_range *l1, 328 struct pnfs_layout_range *l2) 329 { 330 u64 start1 = l1->offset; 331 u64 end1 = end_offset(start1, l1->length); 332 u64 start2 = l2->offset; 333 u64 end2 = end_offset(start2, l2->length); 334 335 return (start1 <= start2) && (end1 >= end2); 336 } 337 338 /* 339 * is l1 and l2 intersecting? 340 * start1 end1 341 * [----------------------------------) 342 * start2 end2 343 * [----------------) 344 */ 345 static inline int 346 lo_seg_intersecting(struct pnfs_layout_range *l1, 347 struct pnfs_layout_range *l2) 348 { 349 u64 start1 = l1->offset; 350 u64 end1 = end_offset(start1, l1->length); 351 u64 start2 = l2->offset; 352 u64 end2 = end_offset(start2, l2->length); 353 354 return (end1 == NFS4_MAX_UINT64 || end1 > start2) && 355 (end2 == NFS4_MAX_UINT64 || end2 > start1); 356 } 357 358 static bool 359 should_free_lseg(struct pnfs_layout_range *lseg_range, 360 struct pnfs_layout_range *recall_range) 361 { 362 return (recall_range->iomode == IOMODE_ANY || 363 lseg_range->iomode == recall_range->iomode) && 364 lo_seg_intersecting(lseg_range, recall_range); 365 } 366 367 /* Returns 1 if lseg is removed from list, 0 otherwise */ 368 static int mark_lseg_invalid(struct pnfs_layout_segment *lseg, 369 struct list_head *tmp_list) 370 { 371 int rv = 0; 372 373 if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) { 374 /* Remove the reference keeping the lseg in the 375 * list. It will now be removed when all 376 * outstanding io is finished. 377 */ 378 dprintk("%s: lseg %p ref %d\n", __func__, lseg, 379 atomic_read(&lseg->pls_refcount)); 380 if (atomic_dec_and_test(&lseg->pls_refcount)) { 381 put_lseg_common(lseg); 382 list_add(&lseg->pls_list, tmp_list); 383 rv = 1; 384 } 385 } 386 return rv; 387 } 388 389 /* Returns count of number of matching invalid lsegs remaining in list 390 * after call. 391 */ 392 int 393 mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo, 394 struct list_head *tmp_list, 395 struct pnfs_layout_range *recall_range) 396 { 397 struct pnfs_layout_segment *lseg, *next; 398 int invalid = 0, removed = 0; 399 400 dprintk("%s:Begin lo %p\n", __func__, lo); 401 402 if (list_empty(&lo->plh_segs)) { 403 /* Reset MDS Threshold I/O counters */ 404 NFS_I(lo->plh_inode)->write_io = 0; 405 NFS_I(lo->plh_inode)->read_io = 0; 406 if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags)) 407 put_layout_hdr_locked(lo); 408 return 0; 409 } 410 list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) 411 if (!recall_range || 412 should_free_lseg(&lseg->pls_range, recall_range)) { 413 dprintk("%s: freeing lseg %p iomode %d " 414 "offset %llu length %llu\n", __func__, 415 lseg, lseg->pls_range.iomode, lseg->pls_range.offset, 416 lseg->pls_range.length); 417 invalid++; 418 removed += mark_lseg_invalid(lseg, tmp_list); 419 } 420 dprintk("%s:Return %i\n", __func__, invalid - removed); 421 return invalid - removed; 422 } 423 424 /* note free_me must contain lsegs from a single layout_hdr */ 425 void 426 pnfs_free_lseg_list(struct list_head *free_me) 427 { 428 struct pnfs_layout_segment *lseg, *tmp; 429 struct pnfs_layout_hdr *lo; 430 431 if (list_empty(free_me)) 432 return; 433 434 lo = list_first_entry(free_me, struct pnfs_layout_segment, 435 pls_list)->pls_layout; 436 437 if (test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags)) { 438 struct nfs_client *clp; 439 440 clp = NFS_SERVER(lo->plh_inode)->nfs_client; 441 spin_lock(&clp->cl_lock); 442 list_del_init(&lo->plh_layouts); 443 spin_unlock(&clp->cl_lock); 444 } 445 list_for_each_entry_safe(lseg, tmp, free_me, pls_list) { 446 list_del(&lseg->pls_list); 447 free_lseg(lseg); 448 } 449 } 450 451 void 452 pnfs_destroy_layout(struct nfs_inode *nfsi) 453 { 454 struct pnfs_layout_hdr *lo; 455 LIST_HEAD(tmp_list); 456 457 spin_lock(&nfsi->vfs_inode.i_lock); 458 lo = nfsi->layout; 459 if (lo) { 460 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */ 461 mark_matching_lsegs_invalid(lo, &tmp_list, NULL); 462 } 463 spin_unlock(&nfsi->vfs_inode.i_lock); 464 pnfs_free_lseg_list(&tmp_list); 465 } 466 EXPORT_SYMBOL_GPL(pnfs_destroy_layout); 467 468 /* 469 * Called by the state manger to remove all layouts established under an 470 * expired lease. 471 */ 472 void 473 pnfs_destroy_all_layouts(struct nfs_client *clp) 474 { 475 struct nfs_server *server; 476 struct pnfs_layout_hdr *lo; 477 LIST_HEAD(tmp_list); 478 479 nfs4_deviceid_mark_client_invalid(clp); 480 nfs4_deviceid_purge_client(clp); 481 482 spin_lock(&clp->cl_lock); 483 rcu_read_lock(); 484 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { 485 if (!list_empty(&server->layouts)) 486 list_splice_init(&server->layouts, &tmp_list); 487 } 488 rcu_read_unlock(); 489 spin_unlock(&clp->cl_lock); 490 491 while (!list_empty(&tmp_list)) { 492 lo = list_entry(tmp_list.next, struct pnfs_layout_hdr, 493 plh_layouts); 494 dprintk("%s freeing layout for inode %lu\n", __func__, 495 lo->plh_inode->i_ino); 496 list_del_init(&lo->plh_layouts); 497 pnfs_destroy_layout(NFS_I(lo->plh_inode)); 498 } 499 } 500 501 /* update lo->plh_stateid with new if is more recent */ 502 void 503 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new, 504 bool update_barrier) 505 { 506 u32 oldseq, newseq; 507 508 oldseq = be32_to_cpu(lo->plh_stateid.seqid); 509 newseq = be32_to_cpu(new->seqid); 510 if ((int)(newseq - oldseq) > 0) { 511 nfs4_stateid_copy(&lo->plh_stateid, new); 512 if (update_barrier) { 513 u32 new_barrier = be32_to_cpu(new->seqid); 514 515 if ((int)(new_barrier - lo->plh_barrier)) 516 lo->plh_barrier = new_barrier; 517 } else { 518 /* Because of wraparound, we want to keep the barrier 519 * "close" to the current seqids. It needs to be 520 * within 2**31 to count as "behind", so if it 521 * gets too near that limit, give us a litle leeway 522 * and bring it to within 2**30. 523 * NOTE - and yes, this is all unsigned arithmetic. 524 */ 525 if (unlikely((newseq - lo->plh_barrier) > (3 << 29))) 526 lo->plh_barrier = newseq - (1 << 30); 527 } 528 } 529 } 530 531 /* lget is set to 1 if called from inside send_layoutget call chain */ 532 static bool 533 pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid, 534 int lget) 535 { 536 if ((stateid) && 537 (int)(lo->plh_barrier - be32_to_cpu(stateid->seqid)) >= 0) 538 return true; 539 return lo->plh_block_lgets || 540 test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags) || 541 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) || 542 (list_empty(&lo->plh_segs) && 543 (atomic_read(&lo->plh_outstanding) > lget)); 544 } 545 546 int 547 pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, 548 struct nfs4_state *open_state) 549 { 550 int status = 0; 551 552 dprintk("--> %s\n", __func__); 553 spin_lock(&lo->plh_inode->i_lock); 554 if (pnfs_layoutgets_blocked(lo, NULL, 1)) { 555 status = -EAGAIN; 556 } else if (list_empty(&lo->plh_segs)) { 557 int seq; 558 559 do { 560 seq = read_seqbegin(&open_state->seqlock); 561 nfs4_stateid_copy(dst, &open_state->stateid); 562 } while (read_seqretry(&open_state->seqlock, seq)); 563 } else 564 nfs4_stateid_copy(dst, &lo->plh_stateid); 565 spin_unlock(&lo->plh_inode->i_lock); 566 dprintk("<-- %s\n", __func__); 567 return status; 568 } 569 570 /* 571 * Get layout from server. 572 * for now, assume that whole file layouts are requested. 573 * arg->offset: 0 574 * arg->length: all ones 575 */ 576 static struct pnfs_layout_segment * 577 send_layoutget(struct pnfs_layout_hdr *lo, 578 struct nfs_open_context *ctx, 579 struct pnfs_layout_range *range, 580 gfp_t gfp_flags) 581 { 582 struct inode *ino = lo->plh_inode; 583 struct nfs_server *server = NFS_SERVER(ino); 584 struct nfs4_layoutget *lgp; 585 struct pnfs_layout_segment *lseg = NULL; 586 struct page **pages = NULL; 587 int i; 588 u32 max_resp_sz, max_pages; 589 590 dprintk("--> %s\n", __func__); 591 592 BUG_ON(ctx == NULL); 593 lgp = kzalloc(sizeof(*lgp), gfp_flags); 594 if (lgp == NULL) 595 return NULL; 596 597 /* allocate pages for xdr post processing */ 598 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 599 max_pages = nfs_page_array_len(0, max_resp_sz); 600 601 pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); 602 if (!pages) 603 goto out_err_free; 604 605 for (i = 0; i < max_pages; i++) { 606 pages[i] = alloc_page(gfp_flags); 607 if (!pages[i]) 608 goto out_err_free; 609 } 610 611 lgp->args.minlength = PAGE_CACHE_SIZE; 612 if (lgp->args.minlength > range->length) 613 lgp->args.minlength = range->length; 614 lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE; 615 lgp->args.range = *range; 616 lgp->args.type = server->pnfs_curr_ld->id; 617 lgp->args.inode = ino; 618 lgp->args.ctx = get_nfs_open_context(ctx); 619 lgp->args.layout.pages = pages; 620 lgp->args.layout.pglen = max_pages * PAGE_SIZE; 621 lgp->lsegpp = &lseg; 622 lgp->gfp_flags = gfp_flags; 623 624 /* Synchronously retrieve layout information from server and 625 * store in lseg. 626 */ 627 nfs4_proc_layoutget(lgp); 628 if (!lseg) { 629 /* remember that LAYOUTGET failed and suspend trying */ 630 set_bit(lo_fail_bit(range->iomode), &lo->plh_flags); 631 } 632 633 /* free xdr pages */ 634 for (i = 0; i < max_pages; i++) 635 __free_page(pages[i]); 636 kfree(pages); 637 638 return lseg; 639 640 out_err_free: 641 /* free any allocated xdr pages, lgp as it's not used */ 642 if (pages) { 643 for (i = 0; i < max_pages; i++) { 644 if (!pages[i]) 645 break; 646 __free_page(pages[i]); 647 } 648 kfree(pages); 649 } 650 kfree(lgp); 651 return NULL; 652 } 653 654 /* 655 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr 656 * when the layout segment list is empty. 657 * 658 * Note that a pnfs_layout_hdr can exist with an empty layout segment 659 * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the 660 * deviceid is marked invalid. 661 */ 662 int 663 _pnfs_return_layout(struct inode *ino) 664 { 665 struct pnfs_layout_hdr *lo = NULL; 666 struct nfs_inode *nfsi = NFS_I(ino); 667 LIST_HEAD(tmp_list); 668 struct nfs4_layoutreturn *lrp; 669 nfs4_stateid stateid; 670 int status = 0, empty; 671 672 dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino); 673 674 spin_lock(&ino->i_lock); 675 lo = nfsi->layout; 676 if (!lo || pnfs_test_layout_returned(lo)) { 677 spin_unlock(&ino->i_lock); 678 dprintk("NFS: %s no layout to return\n", __func__); 679 goto out; 680 } 681 stateid = nfsi->layout->plh_stateid; 682 /* Reference matched in nfs4_layoutreturn_release */ 683 get_layout_hdr(lo); 684 empty = list_empty(&lo->plh_segs); 685 mark_matching_lsegs_invalid(lo, &tmp_list, NULL); 686 /* Don't send a LAYOUTRETURN if list was initially empty */ 687 if (empty) { 688 spin_unlock(&ino->i_lock); 689 put_layout_hdr(lo); 690 dprintk("NFS: %s no layout segments to return\n", __func__); 691 goto out; 692 } 693 lo->plh_block_lgets++; 694 pnfs_mark_layout_returned(lo); 695 spin_unlock(&ino->i_lock); 696 pnfs_free_lseg_list(&tmp_list); 697 698 WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)); 699 700 lrp = kzalloc(sizeof(*lrp), GFP_KERNEL); 701 if (unlikely(lrp == NULL)) { 702 status = -ENOMEM; 703 set_bit(NFS_LAYOUT_RW_FAILED, &lo->plh_flags); 704 set_bit(NFS_LAYOUT_RO_FAILED, &lo->plh_flags); 705 pnfs_clear_layout_returned(lo); 706 put_layout_hdr(lo); 707 goto out; 708 } 709 710 lrp->args.stateid = stateid; 711 lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id; 712 lrp->args.inode = ino; 713 lrp->args.layout = lo; 714 lrp->clp = NFS_SERVER(ino)->nfs_client; 715 716 status = nfs4_proc_layoutreturn(lrp); 717 out: 718 dprintk("<-- %s status: %d\n", __func__, status); 719 return status; 720 } 721 EXPORT_SYMBOL_GPL(_pnfs_return_layout); 722 723 bool pnfs_roc(struct inode *ino) 724 { 725 struct pnfs_layout_hdr *lo; 726 struct pnfs_layout_segment *lseg, *tmp; 727 LIST_HEAD(tmp_list); 728 bool found = false; 729 730 spin_lock(&ino->i_lock); 731 lo = NFS_I(ino)->layout; 732 if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) || 733 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) 734 goto out_nolayout; 735 list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list) 736 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { 737 mark_lseg_invalid(lseg, &tmp_list); 738 found = true; 739 } 740 if (!found) 741 goto out_nolayout; 742 lo->plh_block_lgets++; 743 get_layout_hdr(lo); /* matched in pnfs_roc_release */ 744 spin_unlock(&ino->i_lock); 745 pnfs_free_lseg_list(&tmp_list); 746 return true; 747 748 out_nolayout: 749 spin_unlock(&ino->i_lock); 750 return false; 751 } 752 753 void pnfs_roc_release(struct inode *ino) 754 { 755 struct pnfs_layout_hdr *lo; 756 757 spin_lock(&ino->i_lock); 758 lo = NFS_I(ino)->layout; 759 lo->plh_block_lgets--; 760 put_layout_hdr_locked(lo); 761 spin_unlock(&ino->i_lock); 762 } 763 764 void pnfs_roc_set_barrier(struct inode *ino, u32 barrier) 765 { 766 struct pnfs_layout_hdr *lo; 767 768 spin_lock(&ino->i_lock); 769 lo = NFS_I(ino)->layout; 770 if ((int)(barrier - lo->plh_barrier) > 0) 771 lo->plh_barrier = barrier; 772 spin_unlock(&ino->i_lock); 773 } 774 775 bool pnfs_roc_drain(struct inode *ino, u32 *barrier) 776 { 777 struct nfs_inode *nfsi = NFS_I(ino); 778 struct pnfs_layout_segment *lseg; 779 bool found = false; 780 781 spin_lock(&ino->i_lock); 782 list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) 783 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { 784 found = true; 785 break; 786 } 787 if (!found) { 788 struct pnfs_layout_hdr *lo = nfsi->layout; 789 u32 current_seqid = be32_to_cpu(lo->plh_stateid.seqid); 790 791 /* Since close does not return a layout stateid for use as 792 * a barrier, we choose the worst-case barrier. 793 */ 794 *barrier = current_seqid + atomic_read(&lo->plh_outstanding); 795 } 796 spin_unlock(&ino->i_lock); 797 return found; 798 } 799 800 /* 801 * Compare two layout segments for sorting into layout cache. 802 * We want to preferentially return RW over RO layouts, so ensure those 803 * are seen first. 804 */ 805 static s64 806 cmp_layout(struct pnfs_layout_range *l1, 807 struct pnfs_layout_range *l2) 808 { 809 s64 d; 810 811 /* high offset > low offset */ 812 d = l1->offset - l2->offset; 813 if (d) 814 return d; 815 816 /* short length > long length */ 817 d = l2->length - l1->length; 818 if (d) 819 return d; 820 821 /* read > read/write */ 822 return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ); 823 } 824 825 static void 826 pnfs_insert_layout(struct pnfs_layout_hdr *lo, 827 struct pnfs_layout_segment *lseg) 828 { 829 struct pnfs_layout_segment *lp; 830 831 dprintk("%s:Begin\n", __func__); 832 833 assert_spin_locked(&lo->plh_inode->i_lock); 834 list_for_each_entry(lp, &lo->plh_segs, pls_list) { 835 if (cmp_layout(&lseg->pls_range, &lp->pls_range) > 0) 836 continue; 837 list_add_tail(&lseg->pls_list, &lp->pls_list); 838 dprintk("%s: inserted lseg %p " 839 "iomode %d offset %llu length %llu before " 840 "lp %p iomode %d offset %llu length %llu\n", 841 __func__, lseg, lseg->pls_range.iomode, 842 lseg->pls_range.offset, lseg->pls_range.length, 843 lp, lp->pls_range.iomode, lp->pls_range.offset, 844 lp->pls_range.length); 845 goto out; 846 } 847 list_add_tail(&lseg->pls_list, &lo->plh_segs); 848 dprintk("%s: inserted lseg %p " 849 "iomode %d offset %llu length %llu at tail\n", 850 __func__, lseg, lseg->pls_range.iomode, 851 lseg->pls_range.offset, lseg->pls_range.length); 852 out: 853 get_layout_hdr(lo); 854 855 dprintk("%s:Return\n", __func__); 856 } 857 858 static struct pnfs_layout_hdr * 859 alloc_init_layout_hdr(struct inode *ino, 860 struct nfs_open_context *ctx, 861 gfp_t gfp_flags) 862 { 863 struct pnfs_layout_hdr *lo; 864 865 lo = pnfs_alloc_layout_hdr(ino, gfp_flags); 866 if (!lo) 867 return NULL; 868 atomic_set(&lo->plh_refcount, 1); 869 INIT_LIST_HEAD(&lo->plh_layouts); 870 INIT_LIST_HEAD(&lo->plh_segs); 871 INIT_LIST_HEAD(&lo->plh_bulk_recall); 872 lo->plh_inode = ino; 873 lo->plh_lc_cred = get_rpccred(ctx->state->owner->so_cred); 874 return lo; 875 } 876 877 static struct pnfs_layout_hdr * 878 pnfs_find_alloc_layout(struct inode *ino, 879 struct nfs_open_context *ctx, 880 gfp_t gfp_flags) 881 { 882 struct nfs_inode *nfsi = NFS_I(ino); 883 struct pnfs_layout_hdr *new = NULL; 884 885 dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout); 886 887 assert_spin_locked(&ino->i_lock); 888 if (nfsi->layout) { 889 if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags)) 890 return NULL; 891 else 892 return nfsi->layout; 893 } 894 spin_unlock(&ino->i_lock); 895 new = alloc_init_layout_hdr(ino, ctx, gfp_flags); 896 spin_lock(&ino->i_lock); 897 898 if (likely(nfsi->layout == NULL)) /* Won the race? */ 899 nfsi->layout = new; 900 else 901 pnfs_free_layout_hdr(new); 902 return nfsi->layout; 903 } 904 905 /* 906 * iomode matching rules: 907 * iomode lseg match 908 * ----- ----- ----- 909 * ANY READ true 910 * ANY RW true 911 * RW READ false 912 * RW RW true 913 * READ READ true 914 * READ RW true 915 */ 916 static int 917 is_matching_lseg(struct pnfs_layout_range *ls_range, 918 struct pnfs_layout_range *range) 919 { 920 struct pnfs_layout_range range1; 921 922 if ((range->iomode == IOMODE_RW && 923 ls_range->iomode != IOMODE_RW) || 924 !lo_seg_intersecting(ls_range, range)) 925 return 0; 926 927 /* range1 covers only the first byte in the range */ 928 range1 = *range; 929 range1.length = 1; 930 return lo_seg_contained(ls_range, &range1); 931 } 932 933 /* 934 * lookup range in layout 935 */ 936 static struct pnfs_layout_segment * 937 pnfs_find_lseg(struct pnfs_layout_hdr *lo, 938 struct pnfs_layout_range *range) 939 { 940 struct pnfs_layout_segment *lseg, *ret = NULL; 941 942 dprintk("%s:Begin\n", __func__); 943 944 assert_spin_locked(&lo->plh_inode->i_lock); 945 list_for_each_entry(lseg, &lo->plh_segs, pls_list) { 946 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) && 947 is_matching_lseg(&lseg->pls_range, range)) { 948 ret = get_lseg(lseg); 949 break; 950 } 951 if (lseg->pls_range.offset > range->offset) 952 break; 953 } 954 955 dprintk("%s:Return lseg %p ref %d\n", 956 __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0); 957 return ret; 958 } 959 960 /* 961 * Use mdsthreshold hints set at each OPEN to determine if I/O should go 962 * to the MDS or over pNFS 963 * 964 * The nfs_inode read_io and write_io fields are cumulative counters reset 965 * when there are no layout segments. Note that in pnfs_update_layout iomode 966 * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a 967 * WRITE request. 968 * 969 * A return of true means use MDS I/O. 970 * 971 * From rfc 5661: 972 * If a file's size is smaller than the file size threshold, data accesses 973 * SHOULD be sent to the metadata server. If an I/O request has a length that 974 * is below the I/O size threshold, the I/O SHOULD be sent to the metadata 975 * server. If both file size and I/O size are provided, the client SHOULD 976 * reach or exceed both thresholds before sending its read or write 977 * requests to the data server. 978 */ 979 static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx, 980 struct inode *ino, int iomode) 981 { 982 struct nfs4_threshold *t = ctx->mdsthreshold; 983 struct nfs_inode *nfsi = NFS_I(ino); 984 loff_t fsize = i_size_read(ino); 985 bool size = false, size_set = false, io = false, io_set = false, ret = false; 986 987 if (t == NULL) 988 return ret; 989 990 dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n", 991 __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz); 992 993 switch (iomode) { 994 case IOMODE_READ: 995 if (t->bm & THRESHOLD_RD) { 996 dprintk("%s fsize %llu\n", __func__, fsize); 997 size_set = true; 998 if (fsize < t->rd_sz) 999 size = true; 1000 } 1001 if (t->bm & THRESHOLD_RD_IO) { 1002 dprintk("%s nfsi->read_io %llu\n", __func__, 1003 nfsi->read_io); 1004 io_set = true; 1005 if (nfsi->read_io < t->rd_io_sz) 1006 io = true; 1007 } 1008 break; 1009 case IOMODE_RW: 1010 if (t->bm & THRESHOLD_WR) { 1011 dprintk("%s fsize %llu\n", __func__, fsize); 1012 size_set = true; 1013 if (fsize < t->wr_sz) 1014 size = true; 1015 } 1016 if (t->bm & THRESHOLD_WR_IO) { 1017 dprintk("%s nfsi->write_io %llu\n", __func__, 1018 nfsi->write_io); 1019 io_set = true; 1020 if (nfsi->write_io < t->wr_io_sz) 1021 io = true; 1022 } 1023 break; 1024 } 1025 if (size_set && io_set) { 1026 if (size && io) 1027 ret = true; 1028 } else if (size || io) 1029 ret = true; 1030 1031 dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret); 1032 return ret; 1033 } 1034 1035 /* 1036 * Layout segment is retreived from the server if not cached. 1037 * The appropriate layout segment is referenced and returned to the caller. 1038 */ 1039 struct pnfs_layout_segment * 1040 pnfs_update_layout(struct inode *ino, 1041 struct nfs_open_context *ctx, 1042 loff_t pos, 1043 u64 count, 1044 enum pnfs_iomode iomode, 1045 gfp_t gfp_flags) 1046 { 1047 struct pnfs_layout_range arg = { 1048 .iomode = iomode, 1049 .offset = pos, 1050 .length = count, 1051 }; 1052 unsigned pg_offset; 1053 struct nfs_inode *nfsi = NFS_I(ino); 1054 struct nfs_server *server = NFS_SERVER(ino); 1055 struct nfs_client *clp = server->nfs_client; 1056 struct pnfs_layout_hdr *lo; 1057 struct pnfs_layout_segment *lseg = NULL; 1058 bool first = false; 1059 1060 if (!pnfs_enabled_sb(NFS_SERVER(ino))) 1061 return NULL; 1062 1063 if (pnfs_within_mdsthreshold(ctx, ino, iomode)) 1064 return NULL; 1065 1066 spin_lock(&ino->i_lock); 1067 lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags); 1068 if (lo == NULL) { 1069 dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__); 1070 goto out_unlock; 1071 } 1072 1073 /* Do we even need to bother with this? */ 1074 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) { 1075 dprintk("%s matches recall, use MDS\n", __func__); 1076 goto out_unlock; 1077 } 1078 1079 /* if LAYOUTGET already failed once we don't try again */ 1080 if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags)) 1081 goto out_unlock; 1082 1083 /* Check to see if the layout for the given range already exists */ 1084 lseg = pnfs_find_lseg(lo, &arg); 1085 if (lseg) 1086 goto out_unlock; 1087 1088 if (pnfs_layoutgets_blocked(lo, NULL, 0)) 1089 goto out_unlock; 1090 atomic_inc(&lo->plh_outstanding); 1091 1092 get_layout_hdr(lo); 1093 if (list_empty(&lo->plh_segs)) 1094 first = true; 1095 1096 /* Enable LAYOUTRETURNs */ 1097 pnfs_clear_layout_returned(lo); 1098 1099 spin_unlock(&ino->i_lock); 1100 if (first) { 1101 /* The lo must be on the clp list if there is any 1102 * chance of a CB_LAYOUTRECALL(FILE) coming in. 1103 */ 1104 spin_lock(&clp->cl_lock); 1105 BUG_ON(!list_empty(&lo->plh_layouts)); 1106 list_add_tail(&lo->plh_layouts, &server->layouts); 1107 spin_unlock(&clp->cl_lock); 1108 } 1109 1110 pg_offset = arg.offset & ~PAGE_CACHE_MASK; 1111 if (pg_offset) { 1112 arg.offset -= pg_offset; 1113 arg.length += pg_offset; 1114 } 1115 if (arg.length != NFS4_MAX_UINT64) 1116 arg.length = PAGE_CACHE_ALIGN(arg.length); 1117 1118 lseg = send_layoutget(lo, ctx, &arg, gfp_flags); 1119 if (!lseg && first) { 1120 spin_lock(&clp->cl_lock); 1121 list_del_init(&lo->plh_layouts); 1122 spin_unlock(&clp->cl_lock); 1123 } 1124 atomic_dec(&lo->plh_outstanding); 1125 put_layout_hdr(lo); 1126 out: 1127 dprintk("%s end, state 0x%lx lseg %p\n", __func__, 1128 nfsi->layout ? nfsi->layout->plh_flags : -1, lseg); 1129 return lseg; 1130 out_unlock: 1131 spin_unlock(&ino->i_lock); 1132 goto out; 1133 } 1134 EXPORT_SYMBOL_GPL(pnfs_update_layout); 1135 1136 int 1137 pnfs_layout_process(struct nfs4_layoutget *lgp) 1138 { 1139 struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout; 1140 struct nfs4_layoutget_res *res = &lgp->res; 1141 struct pnfs_layout_segment *lseg; 1142 struct inode *ino = lo->plh_inode; 1143 int status = 0; 1144 1145 /* Inject layout blob into I/O device driver */ 1146 lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags); 1147 if (!lseg || IS_ERR(lseg)) { 1148 if (!lseg) 1149 status = -ENOMEM; 1150 else 1151 status = PTR_ERR(lseg); 1152 dprintk("%s: Could not allocate layout: error %d\n", 1153 __func__, status); 1154 goto out; 1155 } 1156 1157 spin_lock(&ino->i_lock); 1158 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) { 1159 dprintk("%s forget reply due to recall\n", __func__); 1160 goto out_forget_reply; 1161 } 1162 1163 if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) { 1164 dprintk("%s forget reply due to state\n", __func__); 1165 goto out_forget_reply; 1166 } 1167 init_lseg(lo, lseg); 1168 lseg->pls_range = res->range; 1169 *lgp->lsegpp = get_lseg(lseg); 1170 pnfs_insert_layout(lo, lseg); 1171 1172 if (res->return_on_close) { 1173 set_bit(NFS_LSEG_ROC, &lseg->pls_flags); 1174 set_bit(NFS_LAYOUT_ROC, &lo->plh_flags); 1175 } 1176 1177 /* Done processing layoutget. Set the layout stateid */ 1178 pnfs_set_layout_stateid(lo, &res->stateid, false); 1179 spin_unlock(&ino->i_lock); 1180 out: 1181 return status; 1182 1183 out_forget_reply: 1184 spin_unlock(&ino->i_lock); 1185 lseg->pls_layout = lo; 1186 NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg); 1187 goto out; 1188 } 1189 1190 void 1191 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) 1192 { 1193 BUG_ON(pgio->pg_lseg != NULL); 1194 1195 if (req->wb_offset != req->wb_pgbase) { 1196 nfs_pageio_reset_read_mds(pgio); 1197 return; 1198 } 1199 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 1200 req->wb_context, 1201 req_offset(req), 1202 req->wb_bytes, 1203 IOMODE_READ, 1204 GFP_KERNEL); 1205 /* If no lseg, fall back to read through mds */ 1206 if (pgio->pg_lseg == NULL) 1207 nfs_pageio_reset_read_mds(pgio); 1208 1209 } 1210 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read); 1211 1212 void 1213 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) 1214 { 1215 BUG_ON(pgio->pg_lseg != NULL); 1216 1217 if (req->wb_offset != req->wb_pgbase) { 1218 nfs_pageio_reset_write_mds(pgio); 1219 return; 1220 } 1221 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 1222 req->wb_context, 1223 req_offset(req), 1224 req->wb_bytes, 1225 IOMODE_RW, 1226 GFP_NOFS); 1227 /* If no lseg, fall back to write through mds */ 1228 if (pgio->pg_lseg == NULL) 1229 nfs_pageio_reset_write_mds(pgio); 1230 } 1231 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write); 1232 1233 void 1234 pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode, 1235 const struct nfs_pgio_completion_ops *compl_ops) 1236 { 1237 struct nfs_server *server = NFS_SERVER(inode); 1238 struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld; 1239 1240 if (ld == NULL) 1241 nfs_pageio_init_read(pgio, inode, compl_ops); 1242 else 1243 nfs_pageio_init(pgio, inode, ld->pg_read_ops, compl_ops, server->rsize, 0); 1244 } 1245 1246 void 1247 pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, 1248 int ioflags, 1249 const struct nfs_pgio_completion_ops *compl_ops) 1250 { 1251 struct nfs_server *server = NFS_SERVER(inode); 1252 struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld; 1253 1254 if (ld == NULL) 1255 nfs_pageio_init_write(pgio, inode, ioflags, compl_ops); 1256 else 1257 nfs_pageio_init(pgio, inode, ld->pg_write_ops, compl_ops, server->wsize, ioflags); 1258 } 1259 1260 bool 1261 pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, 1262 struct nfs_page *req) 1263 { 1264 if (pgio->pg_lseg == NULL) 1265 return nfs_generic_pg_test(pgio, prev, req); 1266 1267 /* 1268 * Test if a nfs_page is fully contained in the pnfs_layout_range. 1269 * Note that this test makes several assumptions: 1270 * - that the previous nfs_page in the struct nfs_pageio_descriptor 1271 * is known to lie within the range. 1272 * - that the nfs_page being tested is known to be contiguous with the 1273 * previous nfs_page. 1274 * - Layout ranges are page aligned, so we only have to test the 1275 * start offset of the request. 1276 * 1277 * Please also note that 'end_offset' is actually the offset of the 1278 * first byte that lies outside the pnfs_layout_range. FIXME? 1279 * 1280 */ 1281 return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset, 1282 pgio->pg_lseg->pls_range.length); 1283 } 1284 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test); 1285 1286 int pnfs_write_done_resend_to_mds(struct inode *inode, 1287 struct list_head *head, 1288 const struct nfs_pgio_completion_ops *compl_ops) 1289 { 1290 struct nfs_pageio_descriptor pgio; 1291 LIST_HEAD(failed); 1292 1293 /* Resend all requests through the MDS */ 1294 nfs_pageio_init_write(&pgio, inode, FLUSH_STABLE, compl_ops); 1295 while (!list_empty(head)) { 1296 struct nfs_page *req = nfs_list_entry(head->next); 1297 1298 nfs_list_remove_request(req); 1299 if (!nfs_pageio_add_request(&pgio, req)) 1300 nfs_list_add_request(req, &failed); 1301 } 1302 nfs_pageio_complete(&pgio); 1303 1304 if (!list_empty(&failed)) { 1305 /* For some reason our attempt to resend pages. Mark the 1306 * overall send request as having failed, and let 1307 * nfs_writeback_release_full deal with the error. 1308 */ 1309 list_move(&failed, head); 1310 return -EIO; 1311 } 1312 return 0; 1313 } 1314 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds); 1315 1316 static void pnfs_ld_handle_write_error(struct nfs_write_data *data) 1317 { 1318 struct nfs_pgio_header *hdr = data->header; 1319 1320 dprintk("pnfs write error = %d\n", hdr->pnfs_error); 1321 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & 1322 PNFS_LAYOUTRET_ON_ERROR) { 1323 clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags); 1324 pnfs_return_layout(hdr->inode); 1325 } 1326 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) 1327 data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode, 1328 &hdr->pages, 1329 hdr->completion_ops); 1330 } 1331 1332 /* 1333 * Called by non rpc-based layout drivers 1334 */ 1335 void pnfs_ld_write_done(struct nfs_write_data *data) 1336 { 1337 struct nfs_pgio_header *hdr = data->header; 1338 1339 if (!hdr->pnfs_error) { 1340 pnfs_set_layoutcommit(data); 1341 hdr->mds_ops->rpc_call_done(&data->task, data); 1342 } else 1343 pnfs_ld_handle_write_error(data); 1344 hdr->mds_ops->rpc_release(data); 1345 } 1346 EXPORT_SYMBOL_GPL(pnfs_ld_write_done); 1347 1348 static void 1349 pnfs_write_through_mds(struct nfs_pageio_descriptor *desc, 1350 struct nfs_write_data *data) 1351 { 1352 struct nfs_pgio_header *hdr = data->header; 1353 1354 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { 1355 list_splice_tail_init(&hdr->pages, &desc->pg_list); 1356 nfs_pageio_reset_write_mds(desc); 1357 desc->pg_recoalesce = 1; 1358 } 1359 nfs_writedata_release(data); 1360 } 1361 1362 static enum pnfs_try_status 1363 pnfs_try_to_write_data(struct nfs_write_data *wdata, 1364 const struct rpc_call_ops *call_ops, 1365 struct pnfs_layout_segment *lseg, 1366 int how) 1367 { 1368 struct nfs_pgio_header *hdr = wdata->header; 1369 struct inode *inode = hdr->inode; 1370 enum pnfs_try_status trypnfs; 1371 struct nfs_server *nfss = NFS_SERVER(inode); 1372 1373 hdr->mds_ops = call_ops; 1374 1375 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__, 1376 inode->i_ino, wdata->args.count, wdata->args.offset, how); 1377 trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how); 1378 if (trypnfs != PNFS_NOT_ATTEMPTED) 1379 nfs_inc_stats(inode, NFSIOS_PNFS_WRITE); 1380 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs); 1381 return trypnfs; 1382 } 1383 1384 static void 1385 pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *head, int how) 1386 { 1387 struct nfs_write_data *data; 1388 const struct rpc_call_ops *call_ops = desc->pg_rpc_callops; 1389 struct pnfs_layout_segment *lseg = desc->pg_lseg; 1390 1391 desc->pg_lseg = NULL; 1392 while (!list_empty(head)) { 1393 enum pnfs_try_status trypnfs; 1394 1395 data = list_first_entry(head, struct nfs_write_data, list); 1396 list_del_init(&data->list); 1397 1398 trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how); 1399 if (trypnfs == PNFS_NOT_ATTEMPTED) 1400 pnfs_write_through_mds(desc, data); 1401 } 1402 put_lseg(lseg); 1403 } 1404 1405 static void pnfs_writehdr_free(struct nfs_pgio_header *hdr) 1406 { 1407 put_lseg(hdr->lseg); 1408 nfs_writehdr_free(hdr); 1409 } 1410 EXPORT_SYMBOL_GPL(pnfs_writehdr_free); 1411 1412 int 1413 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc) 1414 { 1415 struct nfs_write_header *whdr; 1416 struct nfs_pgio_header *hdr; 1417 int ret; 1418 1419 whdr = nfs_writehdr_alloc(); 1420 if (!whdr) { 1421 desc->pg_completion_ops->error_cleanup(&desc->pg_list); 1422 put_lseg(desc->pg_lseg); 1423 desc->pg_lseg = NULL; 1424 return -ENOMEM; 1425 } 1426 hdr = &whdr->header; 1427 nfs_pgheader_init(desc, hdr, pnfs_writehdr_free); 1428 hdr->lseg = get_lseg(desc->pg_lseg); 1429 atomic_inc(&hdr->refcnt); 1430 ret = nfs_generic_flush(desc, hdr); 1431 if (ret != 0) { 1432 put_lseg(desc->pg_lseg); 1433 desc->pg_lseg = NULL; 1434 } else 1435 pnfs_do_multiple_writes(desc, &hdr->rpc_list, desc->pg_ioflags); 1436 if (atomic_dec_and_test(&hdr->refcnt)) 1437 hdr->completion_ops->completion(hdr); 1438 return ret; 1439 } 1440 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages); 1441 1442 int pnfs_read_done_resend_to_mds(struct inode *inode, 1443 struct list_head *head, 1444 const struct nfs_pgio_completion_ops *compl_ops) 1445 { 1446 struct nfs_pageio_descriptor pgio; 1447 LIST_HEAD(failed); 1448 1449 /* Resend all requests through the MDS */ 1450 nfs_pageio_init_read(&pgio, inode, compl_ops); 1451 while (!list_empty(head)) { 1452 struct nfs_page *req = nfs_list_entry(head->next); 1453 1454 nfs_list_remove_request(req); 1455 if (!nfs_pageio_add_request(&pgio, req)) 1456 nfs_list_add_request(req, &failed); 1457 } 1458 nfs_pageio_complete(&pgio); 1459 1460 if (!list_empty(&failed)) { 1461 list_move(&failed, head); 1462 return -EIO; 1463 } 1464 return 0; 1465 } 1466 EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds); 1467 1468 static void pnfs_ld_handle_read_error(struct nfs_read_data *data) 1469 { 1470 struct nfs_pgio_header *hdr = data->header; 1471 1472 dprintk("pnfs read error = %d\n", hdr->pnfs_error); 1473 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & 1474 PNFS_LAYOUTRET_ON_ERROR) { 1475 clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags); 1476 pnfs_return_layout(hdr->inode); 1477 } 1478 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) 1479 data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode, 1480 &hdr->pages, 1481 hdr->completion_ops); 1482 } 1483 1484 /* 1485 * Called by non rpc-based layout drivers 1486 */ 1487 void pnfs_ld_read_done(struct nfs_read_data *data) 1488 { 1489 struct nfs_pgio_header *hdr = data->header; 1490 1491 if (likely(!hdr->pnfs_error)) { 1492 __nfs4_read_done_cb(data); 1493 hdr->mds_ops->rpc_call_done(&data->task, data); 1494 } else 1495 pnfs_ld_handle_read_error(data); 1496 hdr->mds_ops->rpc_release(data); 1497 } 1498 EXPORT_SYMBOL_GPL(pnfs_ld_read_done); 1499 1500 static void 1501 pnfs_read_through_mds(struct nfs_pageio_descriptor *desc, 1502 struct nfs_read_data *data) 1503 { 1504 struct nfs_pgio_header *hdr = data->header; 1505 1506 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { 1507 list_splice_tail_init(&hdr->pages, &desc->pg_list); 1508 nfs_pageio_reset_read_mds(desc); 1509 desc->pg_recoalesce = 1; 1510 } 1511 nfs_readdata_release(data); 1512 } 1513 1514 /* 1515 * Call the appropriate parallel I/O subsystem read function. 1516 */ 1517 static enum pnfs_try_status 1518 pnfs_try_to_read_data(struct nfs_read_data *rdata, 1519 const struct rpc_call_ops *call_ops, 1520 struct pnfs_layout_segment *lseg) 1521 { 1522 struct nfs_pgio_header *hdr = rdata->header; 1523 struct inode *inode = hdr->inode; 1524 struct nfs_server *nfss = NFS_SERVER(inode); 1525 enum pnfs_try_status trypnfs; 1526 1527 hdr->mds_ops = call_ops; 1528 1529 dprintk("%s: Reading ino:%lu %u@%llu\n", 1530 __func__, inode->i_ino, rdata->args.count, rdata->args.offset); 1531 1532 trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata); 1533 if (trypnfs != PNFS_NOT_ATTEMPTED) 1534 nfs_inc_stats(inode, NFSIOS_PNFS_READ); 1535 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs); 1536 return trypnfs; 1537 } 1538 1539 static void 1540 pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *head) 1541 { 1542 struct nfs_read_data *data; 1543 const struct rpc_call_ops *call_ops = desc->pg_rpc_callops; 1544 struct pnfs_layout_segment *lseg = desc->pg_lseg; 1545 1546 desc->pg_lseg = NULL; 1547 while (!list_empty(head)) { 1548 enum pnfs_try_status trypnfs; 1549 1550 data = list_first_entry(head, struct nfs_read_data, list); 1551 list_del_init(&data->list); 1552 1553 trypnfs = pnfs_try_to_read_data(data, call_ops, lseg); 1554 if (trypnfs == PNFS_NOT_ATTEMPTED) 1555 pnfs_read_through_mds(desc, data); 1556 } 1557 put_lseg(lseg); 1558 } 1559 1560 static void pnfs_readhdr_free(struct nfs_pgio_header *hdr) 1561 { 1562 put_lseg(hdr->lseg); 1563 nfs_readhdr_free(hdr); 1564 } 1565 EXPORT_SYMBOL_GPL(pnfs_readhdr_free); 1566 1567 int 1568 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc) 1569 { 1570 struct nfs_read_header *rhdr; 1571 struct nfs_pgio_header *hdr; 1572 int ret; 1573 1574 rhdr = nfs_readhdr_alloc(); 1575 if (!rhdr) { 1576 desc->pg_completion_ops->error_cleanup(&desc->pg_list); 1577 ret = -ENOMEM; 1578 put_lseg(desc->pg_lseg); 1579 desc->pg_lseg = NULL; 1580 return ret; 1581 } 1582 hdr = &rhdr->header; 1583 nfs_pgheader_init(desc, hdr, pnfs_readhdr_free); 1584 hdr->lseg = get_lseg(desc->pg_lseg); 1585 atomic_inc(&hdr->refcnt); 1586 ret = nfs_generic_pagein(desc, hdr); 1587 if (ret != 0) { 1588 put_lseg(desc->pg_lseg); 1589 desc->pg_lseg = NULL; 1590 } else 1591 pnfs_do_multiple_reads(desc, &hdr->rpc_list); 1592 if (atomic_dec_and_test(&hdr->refcnt)) 1593 hdr->completion_ops->completion(hdr); 1594 return ret; 1595 } 1596 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages); 1597 1598 /* 1599 * There can be multiple RW segments. 1600 */ 1601 static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp) 1602 { 1603 struct pnfs_layout_segment *lseg; 1604 1605 list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) { 1606 if (lseg->pls_range.iomode == IOMODE_RW && 1607 test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) 1608 list_add(&lseg->pls_lc_list, listp); 1609 } 1610 } 1611 1612 void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg) 1613 { 1614 if (lseg->pls_range.iomode == IOMODE_RW) { 1615 dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__); 1616 set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags); 1617 } else { 1618 dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__); 1619 set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags); 1620 } 1621 } 1622 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail); 1623 1624 void 1625 pnfs_set_layoutcommit(struct nfs_write_data *wdata) 1626 { 1627 struct nfs_pgio_header *hdr = wdata->header; 1628 struct inode *inode = hdr->inode; 1629 struct nfs_inode *nfsi = NFS_I(inode); 1630 loff_t end_pos = wdata->mds_offset + wdata->res.count; 1631 bool mark_as_dirty = false; 1632 1633 spin_lock(&inode->i_lock); 1634 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) { 1635 mark_as_dirty = true; 1636 dprintk("%s: Set layoutcommit for inode %lu ", 1637 __func__, inode->i_ino); 1638 } 1639 if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) { 1640 /* references matched in nfs4_layoutcommit_release */ 1641 get_lseg(hdr->lseg); 1642 } 1643 if (end_pos > nfsi->layout->plh_lwb) 1644 nfsi->layout->plh_lwb = end_pos; 1645 spin_unlock(&inode->i_lock); 1646 dprintk("%s: lseg %p end_pos %llu\n", 1647 __func__, hdr->lseg, nfsi->layout->plh_lwb); 1648 1649 /* if pnfs_layoutcommit_inode() runs between inode locks, the next one 1650 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */ 1651 if (mark_as_dirty) 1652 mark_inode_dirty_sync(inode); 1653 } 1654 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit); 1655 1656 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data) 1657 { 1658 struct nfs_server *nfss = NFS_SERVER(data->args.inode); 1659 1660 if (nfss->pnfs_curr_ld->cleanup_layoutcommit) 1661 nfss->pnfs_curr_ld->cleanup_layoutcommit(data); 1662 } 1663 1664 /* 1665 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and 1666 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough 1667 * data to disk to allow the server to recover the data if it crashes. 1668 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag 1669 * is off, and a COMMIT is sent to a data server, or 1670 * if WRITEs to a data server return NFS_DATA_SYNC. 1671 */ 1672 int 1673 pnfs_layoutcommit_inode(struct inode *inode, bool sync) 1674 { 1675 struct nfs4_layoutcommit_data *data; 1676 struct nfs_inode *nfsi = NFS_I(inode); 1677 loff_t end_pos; 1678 int status = 0; 1679 1680 dprintk("--> %s inode %lu\n", __func__, inode->i_ino); 1681 1682 if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) 1683 return 0; 1684 1685 /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */ 1686 data = kzalloc(sizeof(*data), GFP_NOFS); 1687 if (!data) { 1688 status = -ENOMEM; 1689 goto out; 1690 } 1691 1692 if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) 1693 goto out_free; 1694 1695 if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) { 1696 if (!sync) { 1697 status = -EAGAIN; 1698 goto out_free; 1699 } 1700 status = wait_on_bit_lock(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING, 1701 nfs_wait_bit_killable, TASK_KILLABLE); 1702 if (status) 1703 goto out_free; 1704 } 1705 1706 INIT_LIST_HEAD(&data->lseg_list); 1707 spin_lock(&inode->i_lock); 1708 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) { 1709 clear_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags); 1710 spin_unlock(&inode->i_lock); 1711 wake_up_bit(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING); 1712 goto out_free; 1713 } 1714 1715 pnfs_list_write_lseg(inode, &data->lseg_list); 1716 1717 end_pos = nfsi->layout->plh_lwb; 1718 nfsi->layout->plh_lwb = 0; 1719 1720 nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid); 1721 spin_unlock(&inode->i_lock); 1722 1723 data->args.inode = inode; 1724 data->cred = get_rpccred(nfsi->layout->plh_lc_cred); 1725 nfs_fattr_init(&data->fattr); 1726 data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask; 1727 data->res.fattr = &data->fattr; 1728 data->args.lastbytewritten = end_pos - 1; 1729 data->res.server = NFS_SERVER(inode); 1730 1731 status = nfs4_proc_layoutcommit(data, sync); 1732 out: 1733 if (status) 1734 mark_inode_dirty_sync(inode); 1735 dprintk("<-- %s status %d\n", __func__, status); 1736 return status; 1737 out_free: 1738 kfree(data); 1739 goto out; 1740 } 1741 1742 struct nfs4_threshold *pnfs_mdsthreshold_alloc(void) 1743 { 1744 struct nfs4_threshold *thp; 1745 1746 thp = kzalloc(sizeof(*thp), GFP_NOFS); 1747 if (!thp) { 1748 dprintk("%s mdsthreshold allocation failed\n", __func__); 1749 return NULL; 1750 } 1751 return thp; 1752 } 1753