Lines Matching +full:bl +full:- +full:data +full:- +full:offset

54 	switch (be->be_state) {  in is_hole()
58 return be->be_tag ? false : true; in is_hole()
64 /* The data we are handed might be spread across several bios. We need
69 void (*pnfs_callback) (void *data);
70 void *data; member
73 static inline struct parallel_io *alloc_parallel(void *data) in alloc_parallel() argument
79 rv->data = data; in alloc_parallel()
80 kref_init(&rv->refcnt); in alloc_parallel()
87 kref_get(&p->refcnt); in get_parallel()
95 p->pnfs_callback(p->data); in destroy_parallel()
101 kref_put(&p->refcnt, destroy_parallel); in put_parallel()
108 get_parallel(bio->bi_private); in bl_submit_bio()
111 bio->bi_iter.bi_size, in bl_submit_bio()
112 (unsigned long long)bio->bi_iter.bi_sector); in bl_submit_bio()
118 static bool offset_in_map(u64 offset, struct pnfs_block_dev_map *map) in offset_in_map() argument
120 return offset >= map->start && offset < map->start + map->len; in offset_in_map()
127 struct parallel_io *par, unsigned int offset, int *len) in do_add_page_to_bio() argument
130 container_of(be->be_device, struct pnfs_block_dev, node); in do_add_page_to_bio()
133 dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__, in do_add_page_to_bio()
134 npg, (__force u32)op, (unsigned long long)isect, offset, *len); in do_add_page_to_bio()
136 /* translate to device offset */ in do_add_page_to_bio()
137 isect += be->be_v_offset; in do_add_page_to_bio()
138 isect -= be->be_f_offset; in do_add_page_to_bio()
140 /* translate to physical disk offset */ in do_add_page_to_bio()
143 if (!dev->map(dev, disk_addr, map) || !offset_in_map(disk_addr, map)) in do_add_page_to_bio()
144 return ERR_PTR(-EIO); in do_add_page_to_bio()
147 disk_addr += map->disk_offset; in do_add_page_to_bio()
148 disk_addr -= map->start; in do_add_page_to_bio()
152 if (end >= map->start + map->len) in do_add_page_to_bio()
153 *len = map->start + map->len - disk_addr; in do_add_page_to_bio()
157 bio = bio_alloc(map->bdev, bio_max_segs(npg), op, GFP_NOIO); in do_add_page_to_bio()
158 bio->bi_iter.bi_sector = disk_addr >> SECTOR_SHIFT; in do_add_page_to_bio()
159 bio->bi_end_io = end_io; in do_add_page_to_bio()
160 bio->bi_private = par; in do_add_page_to_bio()
162 if (bio_add_page(bio, page, *len, offset) < *len) { in do_add_page_to_bio()
171 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg); in bl_mark_devices_unavailable() local
172 size_t bytes_left = header->args.count; in bl_mark_devices_unavailable()
176 isect = header->args.offset >> SECTOR_SHIFT; in bl_mark_devices_unavailable()
177 bytes_left += header->args.offset - (isect << SECTOR_SHIFT); in bl_mark_devices_unavailable()
180 if (!ext_tree_lookup(bl, isect, &be, rw)) in bl_mark_devices_unavailable()
182 extent_length = be.be_length - (isect - be.be_f_offset); in bl_mark_devices_unavailable()
186 bytes_left -= extent_length << SECTOR_SHIFT; in bl_mark_devices_unavailable()
194 struct parallel_io *par = bio->bi_private; in bl_end_io_read()
196 if (bio->bi_status) { in bl_end_io_read()
197 struct nfs_pgio_header *header = par->data; in bl_end_io_read()
199 if (!header->pnfs_error) in bl_end_io_read()
200 header->pnfs_error = -EIO; in bl_end_io_read()
201 pnfs_set_lo_fail(header->lseg); in bl_end_io_read()
220 bl_end_par_io_read(void *data) in bl_end_par_io_read() argument
222 struct nfs_pgio_header *hdr = data; in bl_end_par_io_read()
224 hdr->task.tk_status = hdr->pnfs_error; in bl_end_par_io_read()
225 INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup); in bl_end_par_io_read()
226 schedule_work(&hdr->task.u.tk_work); in bl_end_par_io_read()
232 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg); in bl_read_pagelist() local
238 loff_t f_offset = header->args.offset; in bl_read_pagelist()
239 size_t bytes_left = header->args.count; in bl_read_pagelist()
240 unsigned int pg_offset = header->args.pgbase, pg_len; in bl_read_pagelist()
241 struct page **pages = header->args.pages; in bl_read_pagelist()
242 int pg_index = header->args.pgbase >> PAGE_SHIFT; in bl_read_pagelist()
243 const bool is_dio = (header->dreq != NULL); in bl_read_pagelist()
247 dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__, in bl_read_pagelist()
248 header->page_array.npages, f_offset, in bl_read_pagelist()
249 (unsigned int)header->args.count); in bl_read_pagelist()
254 par->pnfs_callback = bl_end_par_io_read; in bl_read_pagelist()
259 /* Code assumes extents are page-aligned */ in bl_read_pagelist()
260 for (i = pg_index; i < header->page_array.npages; i++) { in bl_read_pagelist()
266 if (!ext_tree_lookup(bl, isect, &be, false)) { in bl_read_pagelist()
267 header->pnfs_error = -EIO; in bl_read_pagelist()
270 extent_length = be.be_length - (isect - be.be_f_offset); in bl_read_pagelist()
275 pg_len = PAGE_SIZE - pg_offset; in bl_read_pagelist()
293 header->page_array.npages - i, in bl_read_pagelist()
299 header->pnfs_error = PTR_ERR(bio); in bl_read_pagelist()
305 extent_length -= (pg_len >> SECTOR_SHIFT); in bl_read_pagelist()
307 bytes_left -= pg_len; in bl_read_pagelist()
310 if ((isect << SECTOR_SHIFT) >= header->inode->i_size) { in bl_read_pagelist()
311 header->res.eof = 1; in bl_read_pagelist()
312 header->res.count = header->inode->i_size - header->args.offset; in bl_read_pagelist()
314 header->res.count = (isect << SECTOR_SHIFT) - header->args.offset; in bl_read_pagelist()
325 struct parallel_io *par = bio->bi_private; in bl_end_io_write()
326 struct nfs_pgio_header *header = par->data; in bl_end_io_write()
328 if (bio->bi_status) { in bl_end_io_write()
329 if (!header->pnfs_error) in bl_end_io_write()
330 header->pnfs_error = -EIO; in bl_end_io_write()
331 pnfs_set_lo_fail(header->lseg); in bl_end_io_write()
349 if (likely(!hdr->pnfs_error)) { in bl_write_cleanup()
350 struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg); in bl_write_cleanup() local
351 u64 start = hdr->args.offset & (loff_t)PAGE_MASK; in bl_write_cleanup()
352 u64 end = (hdr->args.offset + hdr->args.count + in bl_write_cleanup()
353 PAGE_SIZE - 1) & (loff_t)PAGE_MASK; in bl_write_cleanup()
354 u64 lwb = hdr->args.offset + hdr->args.count; in bl_write_cleanup()
356 ext_tree_mark_written(bl, start >> SECTOR_SHIFT, in bl_write_cleanup()
357 (end - start) >> SECTOR_SHIFT, lwb); in bl_write_cleanup()
364 static void bl_end_par_io_write(void *data) in bl_end_par_io_write() argument
366 struct nfs_pgio_header *hdr = data; in bl_end_par_io_write()
368 hdr->task.tk_status = hdr->pnfs_error; in bl_end_par_io_write()
369 hdr->verf.committed = NFS_FILE_SYNC; in bl_end_par_io_write()
370 INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup); in bl_end_par_io_write()
371 schedule_work(&hdr->task.u.tk_work); in bl_end_par_io_write()
377 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg); in bl_write_pagelist() local
383 loff_t offset = header->args.offset; in bl_write_pagelist() local
384 size_t count = header->args.count; in bl_write_pagelist()
385 struct page **pages = header->args.pages; in bl_write_pagelist()
386 int pg_index = header->args.pgbase >> PAGE_SHIFT; in bl_write_pagelist()
391 dprintk("%s enter, %zu@%lld\n", __func__, count, offset); in bl_write_pagelist()
393 /* At this point, header->page_aray is a (sequential) list of nfs_pages. in bl_write_pagelist()
400 par->pnfs_callback = bl_end_par_io_write; in bl_write_pagelist()
405 offset = offset & (loff_t)PAGE_MASK; in bl_write_pagelist()
406 isect = offset >> SECTOR_SHIFT; in bl_write_pagelist()
408 for (i = pg_index; i < header->page_array.npages; i++) { in bl_write_pagelist()
413 if (!ext_tree_lookup(bl, isect, &be, true)) { in bl_write_pagelist()
414 header->pnfs_error = -EINVAL; in bl_write_pagelist()
418 extent_length = be.be_length - (isect - be.be_f_offset); in bl_write_pagelist()
422 bio = do_add_page_to_bio(bio, header->page_array.npages - i, in bl_write_pagelist()
426 header->pnfs_error = PTR_ERR(bio); in bl_write_pagelist()
431 offset += pg_len; in bl_write_pagelist()
432 count -= pg_len; in bl_write_pagelist()
434 extent_length -= (pg_len >> SECTOR_SHIFT); in bl_write_pagelist()
437 header->res.count = header->args.count; in bl_write_pagelist()
447 struct pnfs_block_layout *bl = BLK_LO2EXT(lo); in bl_free_layout_hdr() local
452 err = ext_tree_remove(bl, true, 0, LLONG_MAX); in bl_free_layout_hdr()
455 kfree_rcu(bl, bl_layout.plh_rcu); in bl_free_layout_hdr()
461 struct pnfs_block_layout *bl; in __bl_alloc_layout_hdr() local
464 bl = kzalloc(sizeof(*bl), gfp_flags); in __bl_alloc_layout_hdr()
465 if (!bl) in __bl_alloc_layout_hdr()
468 bl->bl_ext_rw = RB_ROOT; in __bl_alloc_layout_hdr()
469 bl->bl_ext_ro = RB_ROOT; in __bl_alloc_layout_hdr()
470 spin_lock_init(&bl->bl_ext_lock); in __bl_alloc_layout_hdr()
472 bl->bl_scsi_layout = is_scsi_layout; in __bl_alloc_layout_hdr()
473 return &bl->bl_layout; in __bl_alloc_layout_hdr()
497 u64 start; /* Expected start of next non-COW extent */
502 /* Verify the extent meets the layout requirements of the pnfs-block draft,
508 if (lv->mode == IOMODE_READ) { in verify_extent()
509 if (be->be_state == PNFS_BLOCK_READWRITE_DATA || in verify_extent()
510 be->be_state == PNFS_BLOCK_INVALID_DATA) in verify_extent()
511 return -EIO; in verify_extent()
512 if (be->be_f_offset != lv->start) in verify_extent()
513 return -EIO; in verify_extent()
514 lv->start += be->be_length; in verify_extent()
517 /* lv->mode == IOMODE_RW */ in verify_extent()
518 if (be->be_state == PNFS_BLOCK_READWRITE_DATA) { in verify_extent()
519 if (be->be_f_offset != lv->start) in verify_extent()
520 return -EIO; in verify_extent()
521 if (lv->cowread > lv->start) in verify_extent()
522 return -EIO; in verify_extent()
523 lv->start += be->be_length; in verify_extent()
524 lv->inval = lv->start; in verify_extent()
526 } else if (be->be_state == PNFS_BLOCK_INVALID_DATA) { in verify_extent()
527 if (be->be_f_offset != lv->start) in verify_extent()
528 return -EIO; in verify_extent()
529 lv->start += be->be_length; in verify_extent()
531 } else if (be->be_state == PNFS_BLOCK_READ_DATA) { in verify_extent()
532 if (be->be_f_offset > lv->start) in verify_extent()
533 return -EIO; in verify_extent()
534 if (be->be_f_offset < lv->inval) in verify_extent()
535 return -EIO; in verify_extent()
536 if (be->be_f_offset < lv->cowread) in verify_extent()
537 return -EIO; in verify_extent()
538 /* It looks like you might want to min this with lv->start, in verify_extent()
541 lv->inval = lv->inval + be->be_length; in verify_extent()
542 lv->cowread = be->be_f_offset + be->be_length; in verify_extent()
545 return -EIO; in verify_extent()
555 return -1; in decode_sector_number()
567 int err = -ENODEV; in bl_find_get_deviceid()
572 return ERR_PTR(-ENODEV); in bl_find_get_deviceid()
580 if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) { in bl_find_get_deviceid()
582 unsigned long start = end - PNFS_DEVICE_RETRY_TIMEOUT; in bl_find_get_deviceid()
584 if (!time_in_range(node->timestamp_unavailable, start, end)) { in bl_find_get_deviceid()
586 nfs4_delete_deviceid(node->ld, node->nfs_client, id); in bl_find_get_deviceid()
620 return -EIO; in bl_alloc_extent()
624 return -ENOMEM; in bl_alloc_extent()
629 be->be_device = bl_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id, in bl_alloc_extent()
630 lo->plh_lc_cred, gfp_mask); in bl_alloc_extent()
631 if (IS_ERR(be->be_device)) { in bl_alloc_extent()
632 error = PTR_ERR(be->be_device); in bl_alloc_extent()
638 * extent structure in 512-byte granularity. in bl_alloc_extent()
640 error = -EIO; in bl_alloc_extent()
641 if (decode_sector_number(&p, &be->be_f_offset) < 0) in bl_alloc_extent()
643 if (decode_sector_number(&p, &be->be_length) < 0) in bl_alloc_extent()
645 if (decode_sector_number(&p, &be->be_v_offset) < 0) in bl_alloc_extent()
647 be->be_state = be32_to_cpup(p++); in bl_alloc_extent()
655 list_add_tail(&be->be_list, extents); in bl_alloc_extent()
659 nfs4_put_deviceid_node(be->be_device); in bl_alloc_extent()
670 .mode = lgr->range.iomode, in bl_alloc_lseg()
671 .start = lgr->range.offset >> SECTOR_SHIFT, in bl_alloc_lseg()
672 .inval = lgr->range.offset >> SECTOR_SHIFT, in bl_alloc_lseg()
673 .cowread = lgr->range.offset >> SECTOR_SHIFT, in bl_alloc_lseg()
675 struct pnfs_block_layout *bl = BLK_LO2EXT(lo); in bl_alloc_lseg() local
685 dprintk("---> %s\n", __func__); in bl_alloc_lseg()
689 return ERR_PTR(-ENOMEM); in bl_alloc_lseg()
691 status = -ENOMEM; in bl_alloc_lseg()
697 lgr->layoutp->pages, lgr->layoutp->len); in bl_alloc_lseg()
700 status = -EIO; in bl_alloc_lseg()
718 if (lgr->range.offset + lgr->range.length != in bl_alloc_lseg()
721 status = -EIO; in bl_alloc_lseg()
727 status = -EIO; in bl_alloc_lseg()
735 list_del(&be->be_list); in bl_alloc_lseg()
738 status = ext_tree_insert(bl, be); in bl_alloc_lseg()
741 nfs4_put_deviceid_node(be->be_device); in bl_alloc_lseg()
751 case -ENODEV: in bl_alloc_lseg()
753 set_bit(NFS_LSEG_UNAVAILABLE, &lseg->pls_flags); in bl_alloc_lseg()
767 struct pnfs_block_layout *bl = BLK_LO2EXT(lo); in bl_return_range() local
768 sector_t offset = range->offset >> SECTOR_SHIFT, end; in bl_return_range() local
770 if (range->offset % 8) { in bl_return_range()
771 dprintk("%s: offset %lld not block size aligned\n", in bl_return_range()
772 __func__, range->offset); in bl_return_range()
776 if (range->length != NFS4_MAX_UINT64) { in bl_return_range()
777 if (range->length % 8) { in bl_return_range()
779 __func__, range->length); in bl_return_range()
783 end = offset + (range->length >> SECTOR_SHIFT); in bl_return_range()
788 ext_tree_remove(bl, range->iomode & IOMODE_RW, offset, end); in bl_return_range()
800 ext_tree_mark_committed(&lcdata->args, lcdata->res.status); in bl_cleanup_layoutcommit()
808 if (server->pnfs_blksize == 0) { in bl_set_layoutdriver()
810 return -EINVAL; in bl_set_layoutdriver()
812 if (server->pnfs_blksize > PAGE_SIZE) { in bl_set_layoutdriver()
814 __func__, server->pnfs_blksize); in bl_set_layoutdriver()
815 return -EINVAL; in bl_set_layoutdriver()
829 if (pgio->pg_dreq == NULL) in is_aligned_req()
832 if (!IS_ALIGNED(req->wb_offset, alignment)) in is_aligned_req()
835 if (IS_ALIGNED(req->wb_bytes, alignment)) in is_aligned_req()
839 (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode))) { in is_aligned_req()
842 * the full page. Data past the inode size is in is_aligned_req()
863 if (pgio->pg_lseg && in bl_pg_init_read()
864 test_bit(NFS_LSEG_UNAVAILABLE, &pgio->pg_lseg->pls_flags)) { in bl_pg_init_read()
865 pnfs_error_mark_layout_for_return(pgio->pg_inode, pgio->pg_lseg); in bl_pg_init_read()
866 pnfs_set_lo_fail(pgio->pg_lseg); in bl_pg_init_read()
873 * of bytes (maximum @req->wb_bytes) that can be coalesced.
890 struct address_space *mapping = inode->i_mapping; in pnfs_num_cont_bytes()
895 if (end != inode->i_mapping->nrpages) { in pnfs_num_cont_bytes()
902 return i_size_read(inode) - (idx << PAGE_SHIFT); in pnfs_num_cont_bytes()
904 return (end - idx) << PAGE_SHIFT; in pnfs_num_cont_bytes()
917 if (pgio->pg_dreq == NULL) in bl_pg_init_write()
918 wb_size = pnfs_num_cont_bytes(pgio->pg_inode, req->wb_index); in bl_pg_init_write()
920 wb_size = nfs_dreq_bytes_left(pgio->pg_dreq, req_offset(req)); in bl_pg_init_write()
924 if (pgio->pg_lseg && in bl_pg_init_write()
925 test_bit(NFS_LSEG_UNAVAILABLE, &pgio->pg_lseg->pls_flags)) { in bl_pg_init_write()
927 pnfs_error_mark_layout_for_return(pgio->pg_inode, pgio->pg_lseg); in bl_pg_init_write()
928 pnfs_set_lo_fail(pgio->pg_lseg); in bl_pg_init_write()
935 * of bytes (maximum @req->wb_bytes) that can be coalesced.
1046 MODULE_ALIAS("nfs-layouttype4-3");
1047 MODULE_ALIAS("nfs-layouttype4-5");