1 /* 2 * linux/fs/nfs/blocklayout/blocklayout.c 3 * 4 * Module for the NFSv4.1 pNFS block layout driver. 5 * 6 * Copyright (c) 2006 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Andy Adamson <andros@citi.umich.edu> 10 * Fred Isaman <iisaman@umich.edu> 11 * 12 * permission is granted to use, copy, create derivative works and 13 * redistribute this software and such derivative works for any purpose, 14 * so long as the name of the university of michigan is not used in 15 * any advertising or publicity pertaining to the use or distribution 16 * of this software without specific, written prior authorization. if 17 * the above copyright notice or any other identification of the 18 * university of michigan is included in any copy of any portion of 19 * this software, then the disclaimer below must also be included. 20 * 21 * this software is provided as is, without representation from the 22 * university of michigan as to its fitness for any purpose, and without 23 * warranty by the university of michigan of any kind, either express 24 * or implied, including without limitation the implied warranties of 25 * merchantability and fitness for a particular purpose. the regents 26 * of the university of michigan shall not be liable for any damages, 27 * including special, indirect, incidental, or consequential damages, 28 * with respect to any claim arising out or in connection with the use 29 * of the software, even if it has been or is hereafter advised of the 30 * possibility of such damages. 31 */ 32 33 #include <linux/module.h> 34 #include <linux/init.h> 35 #include <linux/mount.h> 36 #include <linux/namei.h> 37 #include <linux/bio.h> /* struct bio */ 38 #include <linux/prefetch.h> 39 40 #include "../pnfs.h" 41 #include "../nfs4session.h" 42 #include "../internal.h" 43 #include "blocklayout.h" 44 45 #define NFSDBG_FACILITY NFSDBG_PNFS_LD 46 47 MODULE_LICENSE("GPL"); 48 MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>"); 49 MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver"); 50 51 static bool is_hole(struct pnfs_block_extent *be) 52 { 53 switch (be->be_state) { 54 case PNFS_BLOCK_NONE_DATA: 55 return true; 56 case PNFS_BLOCK_INVALID_DATA: 57 return be->be_tag ? false : true; 58 default: 59 return false; 60 } 61 } 62 63 /* The data we are handed might be spread across several bios. We need 64 * to track when the last one is finished. 65 */ 66 struct parallel_io { 67 struct kref refcnt; 68 void (*pnfs_callback) (void *data); 69 void *data; 70 }; 71 72 static inline struct parallel_io *alloc_parallel(void *data) 73 { 74 struct parallel_io *rv; 75 76 rv = kmalloc_obj(*rv, GFP_NOFS); 77 if (rv) { 78 rv->data = data; 79 kref_init(&rv->refcnt); 80 } 81 return rv; 82 } 83 84 static inline void get_parallel(struct parallel_io *p) 85 { 86 kref_get(&p->refcnt); 87 } 88 89 static void destroy_parallel(struct kref *kref) 90 { 91 struct parallel_io *p = container_of(kref, struct parallel_io, refcnt); 92 93 dprintk("%s enter\n", __func__); 94 p->pnfs_callback(p->data); 95 kfree(p); 96 } 97 98 static inline void put_parallel(struct parallel_io *p) 99 { 100 kref_put(&p->refcnt, destroy_parallel); 101 } 102 103 static struct bio * 104 bl_submit_bio(struct bio *bio) 105 { 106 if (bio) { 107 get_parallel(bio->bi_private); 108 dprintk("%s submitting %s bio %u@%llu\n", __func__, 109 bio_op(bio) == READ ? "read" : "write", 110 bio->bi_iter.bi_size, 111 (unsigned long long)bio->bi_iter.bi_sector); 112 submit_bio(bio); 113 } 114 return NULL; 115 } 116 117 static bool offset_in_map(u64 offset, struct pnfs_block_dev_map *map) 118 { 119 return offset >= map->start && offset < map->start + map->len; 120 } 121 122 static struct bio * 123 do_add_page_to_bio(struct bio *bio, int npg, enum req_op op, sector_t isect, 124 struct page *page, struct pnfs_block_dev_map *map, 125 struct pnfs_block_extent *be, bio_end_io_t end_io, 126 struct parallel_io *par, unsigned int offset, int *len) 127 { 128 struct pnfs_block_dev *dev = 129 container_of(be->be_device, struct pnfs_block_dev, node); 130 u64 disk_addr, end; 131 132 dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__, 133 npg, (__force u32)op, (unsigned long long)isect, offset, *len); 134 135 /* translate to device offset */ 136 isect += be->be_v_offset; 137 isect -= be->be_f_offset; 138 139 /* translate to physical disk offset */ 140 disk_addr = (u64)isect << SECTOR_SHIFT; 141 if (!offset_in_map(disk_addr, map)) { 142 if (!dev->map(dev, disk_addr, map) || !offset_in_map(disk_addr, map)) 143 return ERR_PTR(-EIO); 144 bio = bl_submit_bio(bio); 145 } 146 disk_addr += map->disk_offset; 147 disk_addr -= map->start; 148 149 /* limit length to what the device mapping allows */ 150 end = disk_addr + *len; 151 if (end >= map->disk_offset + map->len) 152 *len = map->disk_offset + map->len - disk_addr; 153 154 retry: 155 if (!bio) { 156 bio = bio_alloc(map->bdev, bio_max_segs(npg), op, GFP_NOIO); 157 bio->bi_iter.bi_sector = disk_addr >> SECTOR_SHIFT; 158 bio->bi_end_io = end_io; 159 bio->bi_private = par; 160 } 161 if (bio_add_page(bio, page, *len, offset) < *len) { 162 bio = bl_submit_bio(bio); 163 goto retry; 164 } 165 return bio; 166 } 167 168 static void bl_mark_devices_unavailable(struct nfs_pgio_header *header, bool rw) 169 { 170 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg); 171 size_t bytes_left = header->args.count; 172 sector_t isect, extent_length = 0; 173 struct pnfs_block_extent be; 174 175 isect = header->args.offset >> SECTOR_SHIFT; 176 bytes_left += header->args.offset - (isect << SECTOR_SHIFT); 177 178 while (bytes_left > 0) { 179 if (!ext_tree_lookup(bl, isect, &be, rw)) 180 return; 181 extent_length = be.be_length - (isect - be.be_f_offset); 182 nfs4_mark_deviceid_unavailable(be.be_device); 183 isect += extent_length; 184 if (bytes_left > extent_length << SECTOR_SHIFT) 185 bytes_left -= extent_length << SECTOR_SHIFT; 186 else 187 bytes_left = 0; 188 } 189 } 190 191 static void bl_end_io_read(struct bio *bio) 192 { 193 struct parallel_io *par = bio->bi_private; 194 195 if (bio->bi_status) { 196 struct nfs_pgio_header *header = par->data; 197 198 if (!header->pnfs_error) 199 header->pnfs_error = -EIO; 200 pnfs_set_lo_fail(header->lseg); 201 bl_mark_devices_unavailable(header, false); 202 } 203 204 bio_put(bio); 205 put_parallel(par); 206 } 207 208 static void bl_read_cleanup(struct work_struct *work) 209 { 210 struct rpc_task *task; 211 struct nfs_pgio_header *hdr; 212 dprintk("%s enter\n", __func__); 213 task = container_of(work, struct rpc_task, u.tk_work); 214 hdr = container_of(task, struct nfs_pgio_header, task); 215 pnfs_ld_read_done(hdr); 216 } 217 218 static void 219 bl_end_par_io_read(void *data) 220 { 221 struct nfs_pgio_header *hdr = data; 222 223 hdr->task.tk_status = hdr->pnfs_error; 224 INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup); 225 schedule_work(&hdr->task.u.tk_work); 226 } 227 228 static enum pnfs_try_status 229 bl_read_pagelist(struct nfs_pgio_header *header) 230 { 231 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg); 232 struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 }; 233 struct bio *bio = NULL; 234 struct pnfs_block_extent be; 235 sector_t isect, extent_length = 0; 236 struct parallel_io *par; 237 loff_t f_offset = header->args.offset; 238 size_t bytes_left = header->args.count; 239 unsigned int pg_offset = header->args.pgbase, pg_len; 240 struct page **pages = header->args.pages; 241 int pg_index = header->args.pgbase >> PAGE_SHIFT; 242 const bool is_dio = (header->dreq != NULL); 243 struct blk_plug plug; 244 int i; 245 246 dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__, 247 header->page_array.npages, f_offset, 248 (unsigned int)header->args.count); 249 250 par = alloc_parallel(header); 251 if (!par) 252 return PNFS_NOT_ATTEMPTED; 253 par->pnfs_callback = bl_end_par_io_read; 254 255 blk_start_plug(&plug); 256 257 isect = (sector_t) (f_offset >> SECTOR_SHIFT); 258 /* Code assumes extents are page-aligned */ 259 for (i = pg_index; i < header->page_array.npages; i++) { 260 if (extent_length <= 0) { 261 /* We've used up the previous extent */ 262 bio = bl_submit_bio(bio); 263 264 /* Get the next one */ 265 if (!ext_tree_lookup(bl, isect, &be, false)) { 266 header->pnfs_error = -EIO; 267 goto out; 268 } 269 extent_length = be.be_length - (isect - be.be_f_offset); 270 } 271 272 if (is_dio) { 273 if (pg_offset + bytes_left > PAGE_SIZE) 274 pg_len = PAGE_SIZE - pg_offset; 275 else 276 pg_len = bytes_left; 277 } else { 278 BUG_ON(pg_offset != 0); 279 pg_len = PAGE_SIZE; 280 } 281 282 if (is_hole(&be)) { 283 bio = bl_submit_bio(bio); 284 /* Fill hole w/ zeroes w/o accessing device */ 285 dprintk("%s Zeroing page for hole\n", __func__); 286 zero_user_segment(pages[i], pg_offset, pg_len); 287 288 /* invalidate map */ 289 map.start = NFS4_MAX_UINT64; 290 } else { 291 bio = do_add_page_to_bio(bio, 292 header->page_array.npages - i, 293 REQ_OP_READ, 294 isect, pages[i], &map, &be, 295 bl_end_io_read, par, 296 pg_offset, &pg_len); 297 if (IS_ERR(bio)) { 298 header->pnfs_error = PTR_ERR(bio); 299 bio = NULL; 300 goto out; 301 } 302 } 303 isect += (pg_len >> SECTOR_SHIFT); 304 extent_length -= (pg_len >> SECTOR_SHIFT); 305 f_offset += pg_len; 306 bytes_left -= pg_len; 307 pg_offset = 0; 308 } 309 if ((isect << SECTOR_SHIFT) >= header->inode->i_size) { 310 header->res.eof = 1; 311 header->res.count = header->inode->i_size - header->args.offset; 312 } else { 313 header->res.count = (isect << SECTOR_SHIFT) - header->args.offset; 314 } 315 out: 316 bl_submit_bio(bio); 317 blk_finish_plug(&plug); 318 put_parallel(par); 319 return PNFS_ATTEMPTED; 320 } 321 322 static void bl_end_io_write(struct bio *bio) 323 { 324 struct parallel_io *par = bio->bi_private; 325 struct nfs_pgio_header *header = par->data; 326 327 if (bio->bi_status) { 328 if (!header->pnfs_error) 329 header->pnfs_error = -EIO; 330 pnfs_set_lo_fail(header->lseg); 331 bl_mark_devices_unavailable(header, true); 332 } 333 bio_put(bio); 334 put_parallel(par); 335 } 336 337 /* Function scheduled for call during bl_end_par_io_write, 338 * it marks sectors as written and extends the commitlist. 339 */ 340 static void bl_write_cleanup(struct work_struct *work) 341 { 342 struct rpc_task *task = container_of(work, struct rpc_task, u.tk_work); 343 struct nfs_pgio_header *hdr = 344 container_of(task, struct nfs_pgio_header, task); 345 346 dprintk("%s enter\n", __func__); 347 348 if (likely(!hdr->pnfs_error)) { 349 struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg); 350 u64 start = hdr->args.offset & (loff_t)PAGE_MASK; 351 u64 end = (hdr->args.offset + hdr->args.count + 352 PAGE_SIZE - 1) & (loff_t)PAGE_MASK; 353 u64 lwb = hdr->args.offset + hdr->args.count; 354 355 ext_tree_mark_written(bl, start >> SECTOR_SHIFT, 356 (end - start) >> SECTOR_SHIFT, lwb); 357 } 358 359 pnfs_ld_write_done(hdr); 360 } 361 362 /* Called when last of bios associated with a bl_write_pagelist call finishes */ 363 static void bl_end_par_io_write(void *data) 364 { 365 struct nfs_pgio_header *hdr = data; 366 367 hdr->task.tk_status = hdr->pnfs_error; 368 hdr->verf.committed = NFS_FILE_SYNC; 369 INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup); 370 schedule_work(&hdr->task.u.tk_work); 371 } 372 373 static enum pnfs_try_status 374 bl_write_pagelist(struct nfs_pgio_header *header, int sync) 375 { 376 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg); 377 struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 }; 378 struct bio *bio = NULL; 379 struct pnfs_block_extent be; 380 sector_t isect, extent_length = 0; 381 struct parallel_io *par = NULL; 382 loff_t offset = header->args.offset; 383 size_t count = header->args.count; 384 struct page **pages = header->args.pages; 385 int pg_index = header->args.pgbase >> PAGE_SHIFT; 386 unsigned int pg_len; 387 struct blk_plug plug; 388 int i; 389 390 dprintk("%s enter, %zu@%lld\n", __func__, count, offset); 391 392 /* At this point, header->page_aray is a (sequential) list of nfs_pages. 393 * We want to write each, and if there is an error set pnfs_error 394 * to have it redone using nfs. 395 */ 396 par = alloc_parallel(header); 397 if (!par) 398 return PNFS_NOT_ATTEMPTED; 399 par->pnfs_callback = bl_end_par_io_write; 400 401 blk_start_plug(&plug); 402 403 /* we always write out the whole page */ 404 offset = offset & (loff_t)PAGE_MASK; 405 isect = offset >> SECTOR_SHIFT; 406 407 for (i = pg_index; i < header->page_array.npages; i++) { 408 if (extent_length <= 0) { 409 /* We've used up the previous extent */ 410 bio = bl_submit_bio(bio); 411 /* Get the next one */ 412 if (!ext_tree_lookup(bl, isect, &be, true)) { 413 header->pnfs_error = -EINVAL; 414 goto out; 415 } 416 417 extent_length = be.be_length - (isect - be.be_f_offset); 418 } 419 420 pg_len = PAGE_SIZE; 421 bio = do_add_page_to_bio(bio, header->page_array.npages - i, 422 REQ_OP_WRITE, isect, pages[i], &map, 423 &be, bl_end_io_write, par, 0, &pg_len); 424 if (IS_ERR(bio)) { 425 header->pnfs_error = PTR_ERR(bio); 426 bio = NULL; 427 goto out; 428 } 429 430 offset += pg_len; 431 count -= pg_len; 432 isect += (pg_len >> SECTOR_SHIFT); 433 extent_length -= (pg_len >> SECTOR_SHIFT); 434 } 435 436 header->res.count = header->args.count; 437 out: 438 bl_submit_bio(bio); 439 blk_finish_plug(&plug); 440 put_parallel(par); 441 return PNFS_ATTEMPTED; 442 } 443 444 static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo) 445 { 446 struct pnfs_block_layout *bl = BLK_LO2EXT(lo); 447 int err; 448 449 dprintk("%s enter\n", __func__); 450 451 err = ext_tree_remove(bl, true, 0, LLONG_MAX); 452 WARN_ON(err); 453 454 kfree_rcu(bl, bl_layout.plh_rcu); 455 } 456 457 static struct pnfs_layout_hdr *__bl_alloc_layout_hdr(struct inode *inode, 458 gfp_t gfp_flags, bool is_scsi_layout) 459 { 460 struct pnfs_block_layout *bl; 461 462 dprintk("%s enter\n", __func__); 463 bl = kzalloc_obj(*bl, gfp_flags); 464 if (!bl) 465 return NULL; 466 467 bl->bl_ext_rw = RB_ROOT; 468 bl->bl_ext_ro = RB_ROOT; 469 spin_lock_init(&bl->bl_ext_lock); 470 471 bl->bl_scsi_layout = is_scsi_layout; 472 return &bl->bl_layout; 473 } 474 475 static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode, 476 gfp_t gfp_flags) 477 { 478 return __bl_alloc_layout_hdr(inode, gfp_flags, false); 479 } 480 481 static struct pnfs_layout_hdr *sl_alloc_layout_hdr(struct inode *inode, 482 gfp_t gfp_flags) 483 { 484 return __bl_alloc_layout_hdr(inode, gfp_flags, true); 485 } 486 487 static void bl_free_lseg(struct pnfs_layout_segment *lseg) 488 { 489 dprintk("%s enter\n", __func__); 490 kfree(lseg); 491 } 492 493 /* Tracks info needed to ensure extents in layout obey constraints of spec */ 494 struct layout_verification { 495 u32 mode; /* R or RW */ 496 u64 start; /* Expected start of next non-COW extent */ 497 u64 inval; /* Start of INVAL coverage */ 498 u64 cowread; /* End of COW read coverage */ 499 }; 500 501 /* Verify the extent meets the layout requirements of the pnfs-block draft, 502 * section 2.3.1. 503 */ 504 static int verify_extent(struct pnfs_block_extent *be, 505 struct layout_verification *lv) 506 { 507 if (lv->mode == IOMODE_READ) { 508 if (be->be_state == PNFS_BLOCK_READWRITE_DATA || 509 be->be_state == PNFS_BLOCK_INVALID_DATA) 510 return -EIO; 511 if (be->be_f_offset != lv->start) 512 return -EIO; 513 lv->start += be->be_length; 514 return 0; 515 } 516 /* lv->mode == IOMODE_RW */ 517 if (be->be_state == PNFS_BLOCK_READWRITE_DATA) { 518 if (be->be_f_offset != lv->start) 519 return -EIO; 520 if (lv->cowread > lv->start) 521 return -EIO; 522 lv->start += be->be_length; 523 lv->inval = lv->start; 524 return 0; 525 } else if (be->be_state == PNFS_BLOCK_INVALID_DATA) { 526 if (be->be_f_offset != lv->start) 527 return -EIO; 528 lv->start += be->be_length; 529 return 0; 530 } else if (be->be_state == PNFS_BLOCK_READ_DATA) { 531 if (be->be_f_offset > lv->start) 532 return -EIO; 533 if (be->be_f_offset < lv->inval) 534 return -EIO; 535 if (be->be_f_offset < lv->cowread) 536 return -EIO; 537 /* It looks like you might want to min this with lv->start, 538 * but you really don't. 539 */ 540 lv->inval = lv->inval + be->be_length; 541 lv->cowread = be->be_f_offset + be->be_length; 542 return 0; 543 } else 544 return -EIO; 545 } 546 547 static int decode_sector_number(__be32 **rp, sector_t *sp) 548 { 549 uint64_t s; 550 551 *rp = xdr_decode_hyper(*rp, &s); 552 if (s & 0x1ff) { 553 printk(KERN_WARNING "NFS: %s: sector not aligned\n", __func__); 554 return -1; 555 } 556 *sp = s >> SECTOR_SHIFT; 557 return 0; 558 } 559 560 static struct nfs4_deviceid_node * 561 bl_find_get_deviceid(struct nfs_server *server, 562 const struct nfs4_deviceid *id, const struct cred *cred, 563 gfp_t gfp_mask) 564 { 565 struct nfs4_deviceid_node *node; 566 int err = -ENODEV; 567 568 retry: 569 node = nfs4_find_get_deviceid(server, id, cred, gfp_mask); 570 if (!node) 571 return ERR_PTR(-ENODEV); 572 573 /* 574 * Devices that are marked unavailable are left in the cache with a 575 * timeout to avoid sending GETDEVINFO after every LAYOUTGET, or 576 * constantly attempting to register the device. Once marked as 577 * unavailable they must be deleted and never reused. 578 */ 579 if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) { 580 unsigned long end = jiffies; 581 unsigned long start = end - PNFS_DEVICE_RETRY_TIMEOUT; 582 583 if (!time_in_range(node->timestamp_unavailable, start, end)) { 584 /* Uncork subsequent GETDEVINFO operations for this device */ 585 nfs4_delete_deviceid(node->ld, node->nfs_client, id); 586 goto retry; 587 } 588 goto out_put; 589 } 590 591 if (!bl_register_dev(container_of(node, struct pnfs_block_dev, node))) { 592 /* 593 * If we cannot register, treat this device as transient: 594 * Make a negative cache entry for the device 595 */ 596 nfs4_mark_deviceid_unavailable(node); 597 goto out_put; 598 } 599 600 return node; 601 602 out_put: 603 nfs4_put_deviceid_node(node); 604 return ERR_PTR(err); 605 } 606 607 static int 608 bl_alloc_extent(struct xdr_stream *xdr, struct pnfs_layout_hdr *lo, 609 struct layout_verification *lv, struct list_head *extents, 610 gfp_t gfp_mask) 611 { 612 struct pnfs_block_extent *be; 613 struct nfs4_deviceid id; 614 int error; 615 __be32 *p; 616 617 p = xdr_inline_decode(xdr, 28 + NFS4_DEVICEID4_SIZE); 618 if (!p) 619 return -EIO; 620 621 be = kzalloc_obj(*be, GFP_NOFS); 622 if (!be) 623 return -ENOMEM; 624 625 memcpy(&id, p, NFS4_DEVICEID4_SIZE); 626 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE); 627 628 be->be_device = bl_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id, 629 lo->plh_lc_cred, gfp_mask); 630 if (IS_ERR(be->be_device)) { 631 error = PTR_ERR(be->be_device); 632 goto out_free_be; 633 } 634 635 /* 636 * The next three values are read in as bytes, but stored in the 637 * extent structure in 512-byte granularity. 638 */ 639 error = -EIO; 640 if (decode_sector_number(&p, &be->be_f_offset) < 0) 641 goto out_put_deviceid; 642 if (decode_sector_number(&p, &be->be_length) < 0) 643 goto out_put_deviceid; 644 if (decode_sector_number(&p, &be->be_v_offset) < 0) 645 goto out_put_deviceid; 646 be->be_state = be32_to_cpup(p++); 647 648 error = verify_extent(be, lv); 649 if (error) { 650 dprintk("%s: extent verification failed\n", __func__); 651 goto out_put_deviceid; 652 } 653 654 list_add_tail(&be->be_list, extents); 655 return 0; 656 657 out_put_deviceid: 658 nfs4_put_deviceid_node(be->be_device); 659 out_free_be: 660 kfree(be); 661 return error; 662 } 663 664 static struct pnfs_layout_segment * 665 bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr, 666 gfp_t gfp_mask) 667 { 668 struct layout_verification lv = { 669 .mode = lgr->range.iomode, 670 .start = lgr->range.offset >> SECTOR_SHIFT, 671 .inval = lgr->range.offset >> SECTOR_SHIFT, 672 .cowread = lgr->range.offset >> SECTOR_SHIFT, 673 }; 674 struct pnfs_block_layout *bl = BLK_LO2EXT(lo); 675 struct pnfs_layout_segment *lseg; 676 struct xdr_buf buf; 677 struct xdr_stream xdr; 678 struct folio *scratch; 679 int status, i; 680 uint32_t count; 681 __be32 *p; 682 LIST_HEAD(extents); 683 684 dprintk("---> %s\n", __func__); 685 686 lseg = kzalloc_obj(*lseg, gfp_mask); 687 if (!lseg) 688 return ERR_PTR(-ENOMEM); 689 690 status = -ENOMEM; 691 scratch = folio_alloc(gfp_mask, 0); 692 if (!scratch) 693 goto out; 694 695 xdr_init_decode_pages(&xdr, &buf, 696 lgr->layoutp->pages, lgr->layoutp->len); 697 xdr_set_scratch_folio(&xdr, scratch); 698 699 status = -EIO; 700 p = xdr_inline_decode(&xdr, 4); 701 if (unlikely(!p)) 702 goto out_free_scratch; 703 704 count = be32_to_cpup(p++); 705 dprintk("%s: number of extents %d\n", __func__, count); 706 707 /* 708 * Decode individual extents, putting them in temporary staging area 709 * until whole layout is decoded to make error recovery easier. 710 */ 711 for (i = 0; i < count; i++) { 712 status = bl_alloc_extent(&xdr, lo, &lv, &extents, gfp_mask); 713 if (status) 714 goto process_extents; 715 } 716 717 if (lgr->range.offset + lgr->range.length != 718 lv.start << SECTOR_SHIFT) { 719 dprintk("%s Final length mismatch\n", __func__); 720 status = -EIO; 721 goto process_extents; 722 } 723 724 if (lv.start < lv.cowread) { 725 dprintk("%s Final uncovered COW extent\n", __func__); 726 status = -EIO; 727 } 728 729 process_extents: 730 while (!list_empty(&extents)) { 731 struct pnfs_block_extent *be = 732 list_first_entry(&extents, struct pnfs_block_extent, 733 be_list); 734 list_del(&be->be_list); 735 736 if (!status) 737 status = ext_tree_insert(bl, be); 738 739 if (status) { 740 nfs4_put_deviceid_node(be->be_device); 741 kfree(be); 742 } 743 } 744 745 out_free_scratch: 746 folio_put(scratch); 747 out: 748 dprintk("%s returns %d\n", __func__, status); 749 switch (status) { 750 case -ENODEV: 751 /* Our extent block devices are unavailable */ 752 set_bit(NFS_LSEG_UNAVAILABLE, &lseg->pls_flags); 753 fallthrough; 754 case 0: 755 return lseg; 756 default: 757 kfree(lseg); 758 return ERR_PTR(status); 759 } 760 } 761 762 static void 763 bl_return_range(struct pnfs_layout_hdr *lo, 764 struct pnfs_layout_range *range) 765 { 766 struct pnfs_block_layout *bl = BLK_LO2EXT(lo); 767 sector_t offset = range->offset >> SECTOR_SHIFT, end; 768 769 if (range->offset % 8) { 770 dprintk("%s: offset %lld not block size aligned\n", 771 __func__, range->offset); 772 return; 773 } 774 775 if (range->length != NFS4_MAX_UINT64) { 776 if (range->length % 8) { 777 dprintk("%s: length %lld not block size aligned\n", 778 __func__, range->length); 779 return; 780 } 781 782 end = offset + (range->length >> SECTOR_SHIFT); 783 } else { 784 end = round_down(NFS4_MAX_UINT64, PAGE_SIZE); 785 } 786 787 ext_tree_remove(bl, range->iomode & IOMODE_RW, offset, end); 788 } 789 790 static int 791 bl_prepare_layoutcommit(struct nfs4_layoutcommit_args *arg) 792 { 793 return ext_tree_prepare_commit(arg); 794 } 795 796 static void 797 bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata) 798 { 799 ext_tree_mark_committed(&lcdata->args, lcdata->res.status); 800 } 801 802 static int 803 bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh) 804 { 805 dprintk("%s enter\n", __func__); 806 807 if (server->pnfs_blksize == 0) { 808 dprintk("%s Server did not return blksize\n", __func__); 809 return -EINVAL; 810 } 811 if (server->pnfs_blksize > PAGE_SIZE) { 812 printk(KERN_ERR "%s: pNFS blksize %d not supported.\n", 813 __func__, server->pnfs_blksize); 814 return -EINVAL; 815 } 816 817 return 0; 818 } 819 820 static bool 821 is_aligned_req(struct nfs_pageio_descriptor *pgio, 822 struct nfs_page *req, unsigned int alignment, bool is_write) 823 { 824 /* 825 * Always accept buffered writes, higher layers take care of the 826 * right alignment. 827 */ 828 if (pgio->pg_dreq == NULL) 829 return true; 830 831 if (!IS_ALIGNED(req->wb_offset, alignment)) 832 return false; 833 834 if (IS_ALIGNED(req->wb_bytes, alignment)) 835 return true; 836 837 if (is_write && 838 (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode))) { 839 /* 840 * If the write goes up to the inode size, just write 841 * the full page. Data past the inode size is 842 * guaranteed to be zeroed by the higher level client 843 * code, and this behaviour is mandated by RFC 5663 844 * section 2.3.2. 845 */ 846 return true; 847 } 848 849 return false; 850 } 851 852 static void 853 bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) 854 { 855 if (!is_aligned_req(pgio, req, SECTOR_SIZE, false)) { 856 nfs_pageio_reset_read_mds(pgio); 857 return; 858 } 859 860 pnfs_generic_pg_init_read(pgio, req); 861 862 if (pgio->pg_lseg && 863 test_bit(NFS_LSEG_UNAVAILABLE, &pgio->pg_lseg->pls_flags)) { 864 pnfs_error_mark_layout_for_return(pgio->pg_inode, pgio->pg_lseg); 865 pnfs_set_lo_fail(pgio->pg_lseg); 866 nfs_pageio_reset_read_mds(pgio); 867 } 868 } 869 870 /* 871 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number 872 * of bytes (maximum @req->wb_bytes) that can be coalesced. 873 */ 874 static size_t 875 bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, 876 struct nfs_page *req) 877 { 878 if (!is_aligned_req(pgio, req, SECTOR_SIZE, false)) 879 return 0; 880 return pnfs_generic_pg_test(pgio, prev, req); 881 } 882 883 /* 884 * Return the number of contiguous bytes for a given inode 885 * starting at page frame idx. 886 */ 887 static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx) 888 { 889 struct address_space *mapping = inode->i_mapping; 890 pgoff_t end; 891 892 /* Optimize common case that writes from 0 to end of file */ 893 end = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 894 if (end != inode->i_mapping->nrpages) { 895 rcu_read_lock(); 896 end = page_cache_next_miss(mapping, idx + 1, ULONG_MAX); 897 rcu_read_unlock(); 898 } 899 900 if (!end) 901 return i_size_read(inode) - (idx << PAGE_SHIFT); 902 else 903 return (end - idx) << PAGE_SHIFT; 904 } 905 906 static void 907 bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) 908 { 909 u64 wb_size; 910 911 if (!is_aligned_req(pgio, req, PAGE_SIZE, true)) { 912 nfs_pageio_reset_write_mds(pgio); 913 return; 914 } 915 916 if (pgio->pg_dreq == NULL) 917 wb_size = pnfs_num_cont_bytes(pgio->pg_inode, req->wb_index); 918 else 919 wb_size = nfs_dreq_bytes_left(pgio->pg_dreq, req_offset(req)); 920 921 pnfs_generic_pg_init_write(pgio, req, wb_size); 922 923 if (pgio->pg_lseg && 924 test_bit(NFS_LSEG_UNAVAILABLE, &pgio->pg_lseg->pls_flags)) { 925 926 pnfs_error_mark_layout_for_return(pgio->pg_inode, pgio->pg_lseg); 927 pnfs_set_lo_fail(pgio->pg_lseg); 928 nfs_pageio_reset_write_mds(pgio); 929 } 930 } 931 932 /* 933 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number 934 * of bytes (maximum @req->wb_bytes) that can be coalesced. 935 */ 936 static size_t 937 bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, 938 struct nfs_page *req) 939 { 940 if (!is_aligned_req(pgio, req, PAGE_SIZE, true)) 941 return 0; 942 return pnfs_generic_pg_test(pgio, prev, req); 943 } 944 945 static const struct nfs_pageio_ops bl_pg_read_ops = { 946 .pg_init = bl_pg_init_read, 947 .pg_test = bl_pg_test_read, 948 .pg_doio = pnfs_generic_pg_readpages, 949 .pg_cleanup = pnfs_generic_pg_cleanup, 950 }; 951 952 static const struct nfs_pageio_ops bl_pg_write_ops = { 953 .pg_init = bl_pg_init_write, 954 .pg_test = bl_pg_test_write, 955 .pg_doio = pnfs_generic_pg_writepages, 956 .pg_cleanup = pnfs_generic_pg_cleanup, 957 }; 958 959 static struct pnfs_layoutdriver_type blocklayout_type = { 960 .id = LAYOUT_BLOCK_VOLUME, 961 .name = "LAYOUT_BLOCK_VOLUME", 962 .owner = THIS_MODULE, 963 .flags = PNFS_LAYOUTRET_ON_SETATTR | 964 PNFS_LAYOUTRET_ON_ERROR | 965 PNFS_READ_WHOLE_PAGE, 966 .read_pagelist = bl_read_pagelist, 967 .write_pagelist = bl_write_pagelist, 968 .alloc_layout_hdr = bl_alloc_layout_hdr, 969 .free_layout_hdr = bl_free_layout_hdr, 970 .alloc_lseg = bl_alloc_lseg, 971 .free_lseg = bl_free_lseg, 972 .return_range = bl_return_range, 973 .prepare_layoutcommit = bl_prepare_layoutcommit, 974 .cleanup_layoutcommit = bl_cleanup_layoutcommit, 975 .set_layoutdriver = bl_set_layoutdriver, 976 .alloc_deviceid_node = bl_alloc_deviceid_node, 977 .free_deviceid_node = bl_free_deviceid_node, 978 .pg_read_ops = &bl_pg_read_ops, 979 .pg_write_ops = &bl_pg_write_ops, 980 .sync = pnfs_generic_sync, 981 }; 982 983 static struct pnfs_layoutdriver_type scsilayout_type = { 984 .id = LAYOUT_SCSI, 985 .name = "LAYOUT_SCSI", 986 .owner = THIS_MODULE, 987 .flags = PNFS_LAYOUTRET_ON_SETATTR | 988 PNFS_LAYOUTRET_ON_ERROR | 989 PNFS_READ_WHOLE_PAGE, 990 .read_pagelist = bl_read_pagelist, 991 .write_pagelist = bl_write_pagelist, 992 .alloc_layout_hdr = sl_alloc_layout_hdr, 993 .free_layout_hdr = bl_free_layout_hdr, 994 .alloc_lseg = bl_alloc_lseg, 995 .free_lseg = bl_free_lseg, 996 .return_range = bl_return_range, 997 .prepare_layoutcommit = bl_prepare_layoutcommit, 998 .cleanup_layoutcommit = bl_cleanup_layoutcommit, 999 .set_layoutdriver = bl_set_layoutdriver, 1000 .alloc_deviceid_node = bl_alloc_deviceid_node, 1001 .free_deviceid_node = bl_free_deviceid_node, 1002 .pg_read_ops = &bl_pg_read_ops, 1003 .pg_write_ops = &bl_pg_write_ops, 1004 .sync = pnfs_generic_sync, 1005 }; 1006 1007 1008 static int __init nfs4blocklayout_init(void) 1009 { 1010 int ret; 1011 1012 dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__); 1013 1014 ret = bl_init_pipefs(); 1015 if (ret) 1016 goto out; 1017 1018 ret = pnfs_register_layoutdriver(&blocklayout_type); 1019 if (ret) 1020 goto out_cleanup_pipe; 1021 1022 ret = pnfs_register_layoutdriver(&scsilayout_type); 1023 if (ret) 1024 goto out_unregister_block; 1025 return 0; 1026 1027 out_unregister_block: 1028 pnfs_unregister_layoutdriver(&blocklayout_type); 1029 out_cleanup_pipe: 1030 bl_cleanup_pipefs(); 1031 out: 1032 return ret; 1033 } 1034 1035 static void __exit nfs4blocklayout_exit(void) 1036 { 1037 dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n", 1038 __func__); 1039 1040 pnfs_unregister_layoutdriver(&scsilayout_type); 1041 pnfs_unregister_layoutdriver(&blocklayout_type); 1042 bl_cleanup_pipefs(); 1043 } 1044 1045 MODULE_ALIAS("nfs-layouttype4-3"); 1046 MODULE_ALIAS("nfs-layouttype4-5"); 1047 1048 module_init(nfs4blocklayout_init); 1049 module_exit(nfs4blocklayout_exit); 1050