Home
last modified time | relevance | path

Searched refs:lstart (Results 1 – 24 of 24) sorted by relevance

/linux/mm/
H A Dtruncate.c367 loff_t lstart, uoff_t lend) in truncate_inode_pages_range() argument
387 start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; in truncate_inode_pages_range()
412 same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); in truncate_inode_pages_range()
413 folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0); in truncate_inode_pages_range()
416 if (!truncate_inode_partial_folio(folio, lstart, lend)) { in truncate_inode_pages_range()
430 if (!truncate_inode_partial_folio(folio, lstart, lend)) in truncate_inode_pages_range()
483 void truncate_inode_pages(struct address_space *mapping, loff_t lstart) in truncate_inode_pages() argument
485 truncate_inode_pages_range(mapping, lstart, (loff_t)-1); in truncate_inode_pages()
900 void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend) in truncate_pagecache_range() argument
903 loff_t unmap_start = round_up(lstart, PAGE_SIZE); in truncate_pagecache_range()
[all …]
H A Dfilemap.c676 loff_t lstart, loff_t lend) in filemap_write_and_wait_range() argument
680 if (lend < lstart) in filemap_write_and_wait_range()
684 err = filemap_fdatawrite_range(mapping, lstart, lend); in filemap_write_and_wait_range()
692 __filemap_fdatawait_range(mapping, lstart, lend); in filemap_write_and_wait_range()
777 int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend) in file_write_and_wait_range() argument
782 if (lend < lstart) in file_write_and_wait_range()
786 err = filemap_fdatawrite_range(mapping, lstart, lend); in file_write_and_wait_range()
789 __filemap_fdatawait_range(mapping, lstart, lend); in file_write_and_wait_range()
H A Dshmem.c1096 static void shmem_undo_range(struct inode *inode, loff_t lstart, uoff_t lend, in shmem_undo_range() argument
1101 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; in shmem_undo_range()
1145 * even when [lstart, lend] covers only a part of the folio. in shmem_undo_range()
1150 same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); in shmem_undo_range()
1151 folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT); in shmem_undo_range()
1155 if (!truncate_inode_partial_folio(folio, lstart, lend)) { in shmem_undo_range()
1169 if (!truncate_inode_partial_folio(folio, lstart, lend)) in shmem_undo_range()
1222 } else if (truncate_inode_partial_folio(folio, lstart, lend)) { in shmem_undo_range()
1247 void shmem_truncate_range(struct inode *inode, loff_t lstart, uoff_t lend) in shmem_truncate_range() argument
1249 shmem_undo_range(inode, lstart, len in shmem_truncate_range()
5788 shmem_truncate_range(struct inode * inode,loff_t lstart,uoff_t lend) shmem_truncate_range() argument
[all...]
/linux/fs/erofs/
H A Dzmap.c518 erofs_off_t l, r, mid, pa, la, lstart; in z_erofs_map_blocks_ext() local
531 lstart = 0; in z_erofs_map_blocks_ext()
533 lstart = round_down(map->m_la, 1 << vi->z_lclusterbits); in z_erofs_map_blocks_ext()
534 pos += (lstart >> vi->z_lclusterbits) * recsz; in z_erofs_map_blocks_ext()
538 for (; lstart <= map->m_la; lstart += 1 << vi->z_lclusterbits) { in z_erofs_map_blocks_ext()
551 last = (lstart >= round_up(lend, 1 << vi->z_lclusterbits)); in z_erofs_map_blocks_ext()
552 lend = min(lstart, lend); in z_erofs_map_blocks_ext()
553 lstart -= 1 << vi->z_lclusterbits; in z_erofs_map_blocks_ext()
555 lstart = lend; in z_erofs_map_blocks_ext()
580 lstart = la; in z_erofs_map_blocks_ext()
[all …]
/linux/fs/f2fs/
H A Dsegment.c966 struct block_device *bdev, block_t lstart, in __create_discard_cmd() argument
980 dc->di.lstart = lstart; in __create_discard_cmd()
1012 if (cur_dc->di.lstart + cur_dc->di.len > next_dc->di.lstart) { in f2fs_check_discard_tree()
1015 cur_dc->di.lstart, cur_dc->di.len, in f2fs_check_discard_tree()
1016 next_dc->di.lstart, next_dc->di.len); in f2fs_check_discard_tree()
1035 if (blkaddr < dc->di.lstart) in __lookup_discard_cmd()
1037 else if (blkaddr >= dc->di.lstart + dc->di.len) in __lookup_discard_cmd()
1068 if (blkaddr < dc->di.lstart) in __lookup_discard_cmd_ret()
1070 else if (blkaddr >= dc->di.lstart + dc->di.len) in __lookup_discard_cmd_ret()
1081 if (parent && blkaddr > dc->di.lstart) in __lookup_discard_cmd_ret()
[all …]
H A Df2fs.h437 block_t lstart; /* logical start address */ member
1001 return (back->lstart + back->len == front->lstart) && in __is_discard_mergeable()
/linux/drivers/rapidio/devices/
H A Dtsi721.c1110 static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, in tsi721_rio_map_inb_mem() argument
1117 bool direct = (lstart == rstart); in tsi721_rio_map_inb_mem()
1132 ibw_start = lstart & ~(ibw_size - 1); in tsi721_rio_map_inb_mem()
1136 rstart, &lstart, size, ibw_start); in tsi721_rio_map_inb_mem()
1138 while ((lstart + size) > (ibw_start + ibw_size)) { in tsi721_rio_map_inb_mem()
1140 ibw_start = lstart & ~(ibw_size - 1); in tsi721_rio_map_inb_mem()
1155 rstart, &lstart, size); in tsi721_rio_map_inb_mem()
1158 ((u64)lstart & (size - 1)) || (rstart & (size - 1))) in tsi721_rio_map_inb_mem()
1164 loc_start = lstart; in tsi721_rio_map_inb_mem()
1196 map->lstart = lstart; in tsi721_rio_map_inb_mem()
[all …]
H A Dtsi721.h840 dma_addr_t lstart; member
846 dma_addr_t lstart; member
/linux/arch/powerpc/sysdev/
H A Dfsl_rio.c307 static int fsl_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, in fsl_map_inb_mem() argument
324 if (lstart & (base_size - 1)) in fsl_map_inb_mem()
350 out_be32(&priv->inb_atmu_regs[i].riwtar, lstart >> RIWTAR_TRAD_VAL_SHIFT); in fsl_map_inb_mem()
358 static void fsl_unmap_inb_mem(struct rio_mport *mport, dma_addr_t lstart) in fsl_unmap_inb_mem() argument
366 base_start_shift = lstart >> RIWTAR_TRAD_VAL_SHIFT; in fsl_unmap_inb_mem()
/linux/fs/udf/
H A Dudf_i.h15 loff_t lstart; member
H A Dinode.c74 if (iinfo->cached_extent.lstart != -1) { in __udf_clear_extent_cache()
76 iinfo->cached_extent.lstart = -1; in __udf_clear_extent_cache()
98 if ((iinfo->cached_extent.lstart <= bcount) && in udf_read_extent_cache()
99 (iinfo->cached_extent.lstart != -1)) { in udf_read_extent_cache()
101 *lbcount = iinfo->cached_extent.lstart; in udf_read_extent_cache()
124 iinfo->cached_extent.lstart = estart; in udf_update_extent_cache()
H A Dsuper.c166 ei->cached_extent.lstart = -1; in udf_alloc_inode()
/linux/block/
H A Dbdev.c112 loff_t lstart, loff_t lend) in truncate_bdev_range() argument
125 truncate_inode_pages_range(bdev->bd_mapping, lstart, lend); in truncate_bdev_range()
136 lstart >> PAGE_SHIFT, in truncate_bdev_range()
281 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend) in sync_blockdev_range() argument
284 lstart, lend); in sync_blockdev_range()
/linux/lib/zstd/compress/
H A Dzstd_compress_superblock.c494 const BYTE* const lstart = seqStorePtr->litStart; in ZSTD_compressSubBlock_multi() local
496 const BYTE* lp = lstart; in ZSTD_compressSubBlock_multi()
497 size_t const nbLiterals = (size_t)(lend - lstart); in ZSTD_compressSubBlock_multi()
512 (unsigned)srcSize, (unsigned)(lend-lstart), (unsigned)(send-sstart)); in ZSTD_compressSubBlock_multi()
/linux/fs/jfs/
H A Djfs_xtree.h100 extern int xtLookup(struct inode *ip, s64 lstart, s64 llen,
/linux/include/linux/
H A Drio.h420 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
422 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
H A Dpagemap.h43 int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend);
54 bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend);
56 loff_t lstart, loff_t lend);
H A Drio_drv.h367 extern void rio_unmap_inb_region(struct rio_mport *mport, dma_addr_t lstart);
H A Dmm.h3728 void truncate_inode_pages(struct address_space *mapping, loff_t lstart);
3729 void truncate_inode_pages_range(struct address_space *mapping, loff_t lstart,
/linux/fs/hugetlbfs/
H A Dinode.c577 static void remove_inode_hugepages(struct inode *inode, loff_t lstart, in remove_inode_hugepages() argument
589 next = lstart >> PAGE_SHIFT; in remove_inode_hugepages()
614 lstart >> huge_page_shift(h), in remove_inode_hugepages()
/linux/drivers/rapidio/
H A Drio.c709 void rio_unmap_inb_region(struct rio_mport *mport, dma_addr_t lstart) in rio_unmap_inb_region() argument
715 mport->ops->unmap_inb(mport, lstart); in rio_unmap_inb_region()
/linux/fs/ext4/
H A Dinode.c4201 loff_t lstart, loff_t length) in ext4_zero_partial_blocks() argument
4207 loff_t byte_end = (lstart + length - 1); in ext4_zero_partial_blocks()
4210 partial_start = lstart & (sb->s_blocksize - 1); in ext4_zero_partial_blocks()
4213 start = lstart >> sb->s_blocksize_bits; in ext4_zero_partial_blocks()
4220 lstart, length); in ext4_zero_partial_blocks()
4226 lstart, sb->s_blocksize); in ext4_zero_partial_blocks()
/linux/fs/nfs/
H A Dwrite.c1966 loff_t lstart, loff_t lend) in nfs_filemap_write_and_wait_range() argument
1970 ret = filemap_write_and_wait_range(mapping, lstart, lend); in nfs_filemap_write_and_wait_range()
H A Dinternal.h640 loff_t lstart, loff_t lend);