/linux/mm/ |
H A D | truncate.c | 289 loff_t lstart, loff_t lend) in truncate_inode_pages_range() argument 310 if (lend == -1) in truncate_inode_pages_range() 318 end = (lend + 1) >> PAGE_SHIFT; in truncate_inode_pages_range() 334 same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); in truncate_inode_pages_range() 337 same_folio = lend < folio_pos(folio) + folio_size(folio); in truncate_inode_pages_range() 338 if (!truncate_inode_partial_folio(folio, lstart, lend)) { in truncate_inode_pages_range() 349 folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT, in truncate_inode_pages_range() 352 if (!truncate_inode_partial_folio(folio, lstart, lend)) in truncate_inode_pages_range() 822 void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend) in truncate_pagecache_range() argument 826 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1; in truncate_pagecache_range() [all …]
|
H A D | shmem.c | 1071 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, in shmem_undo_range() argument 1077 pgoff_t end = (lend + 1) >> PAGE_SHIFT; in shmem_undo_range() 1086 if (lend == -1) in shmem_undo_range() 1125 same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); in shmem_undo_range() 1128 same_folio = lend < folio_pos(folio) + folio_size(folio); in shmem_undo_range() 1130 if (!truncate_inode_partial_folio(folio, lstart, lend)) { in shmem_undo_range() 1141 folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT); in shmem_undo_range() 1144 if (!truncate_inode_partial_folio(folio, lstart, lend)) in shmem_undo_range() 1197 } else if (truncate_inode_partial_folio(folio, lstart, lend)) { in shmem_undo_range() 1222 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) in shmem_truncate_range() argument [all …]
|
H A D | filemap.c | 686 loff_t lstart, loff_t lend) in filemap_write_and_wait_range() argument 690 if (lend < lstart) in filemap_write_and_wait_range() 694 err = __filemap_fdatawrite_range(mapping, lstart, lend, in filemap_write_and_wait_range() 703 __filemap_fdatawait_range(mapping, lstart, lend); in filemap_write_and_wait_range() 788 int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend) in file_write_and_wait_range() argument 793 if (lend < lstart) in file_write_and_wait_range() 797 err = __filemap_fdatawrite_range(mapping, lstart, lend, in file_write_and_wait_range() 801 __filemap_fdatawait_range(mapping, lstart, lend); in file_write_and_wait_range()
|
/linux/arch/xtensa/kernel/ |
H A D | ptrace.c | 48 .lend = regs->lend, in gpr_get() 88 regs->lend = newregs.lend; in gpr_set() 314 tmp = regs->lend; in ptrace_peekusr()
|
H A D | signal.c | 154 COPY(lend); in setup_sigcontext() 192 COPY(lend); in restore_sigcontext() 215 && ((regs->lbeg > TASK_SIZE) || (regs->lend > TASK_SIZE)) ) in restore_sigcontext()
|
H A D | asm-offsets.c | 41 DEFINE(PT_LEND, offsetof (struct pt_regs, lend)); in main()
|
H A D | traps.c | 544 regs->lbeg, regs->lend, regs->lcount, regs->sar); in show_regs()
|
H A D | align.S | 444 rsr a4, lend # check if we reached LEND
|
H A D | entry.S | 752 wsr a3, lend
|
/linux/lib/zstd/compress/ |
H A D | zstd_compress_superblock.c | 439 const BYTE* const lend = seqStorePtr->lit; in ZSTD_compressSubBlock_multi() local 456 (unsigned)(lend-lp), (unsigned)(send-sstart)); in ZSTD_compressSubBlock_multi() 471 assert(lp <= lend); in ZSTD_compressSubBlock_multi() 472 assert(litSize <= (size_t)(lend - lp)); in ZSTD_compressSubBlock_multi() 473 litSize = (size_t)(lend - lp); in ZSTD_compressSubBlock_multi()
|
/linux/sound/soc/sof/intel/ |
H A D | telemetry.h | 29 u32 lend; member
|
/linux/arch/xtensa/include/uapi/asm/ |
H A D | ptrace.h | 51 __u32 lend; member
|
/linux/arch/xtensa/include/asm/ |
H A D | ptrace.h | 62 unsigned long lend; /* 36 */ member
|
/linux/block/ |
H A D | bdev.c | 112 loff_t lstart, loff_t lend) in truncate_bdev_range() argument 125 truncate_inode_pages_range(bdev->bd_mapping, lstart, lend); in truncate_bdev_range() 137 lend >> PAGE_SHIFT); in truncate_bdev_range() 222 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend) in sync_blockdev_range() argument 225 lstart, lend); in sync_blockdev_range()
|
H A D | blk.h | 595 loff_t lstart, loff_t lend);
|
/linux/drivers/md/ |
H A D | dm-cache-policy-smq.c | 388 unsigned int lbegin, unsigned int lend) in q_set_targets_subrange_() argument 392 BUG_ON(lbegin > lend); in q_set_targets_subrange_() 393 BUG_ON(lend > q->nr_levels); in q_set_targets_subrange_() 394 nr_levels = lend - lbegin; in q_set_targets_subrange_() 398 for (level = lbegin; level < lend; level++) in q_set_targets_subrange_()
|
/linux/fs/hugetlbfs/ |
H A D | inode.c | 570 loff_t lend) in remove_inode_hugepages() argument 574 const pgoff_t end = lend >> PAGE_SHIFT; in remove_inode_hugepages() 578 bool truncate_op = (lend == LLONG_MAX); in remove_inode_hugepages()
|
/linux/fs/gfs2/ |
H A D | bmap.c | 1755 u64 lend; in punch_hole() local 1764 lend = end_offset >> bsize_shift; in punch_hole() 1766 if (lblock >= lend) in punch_hole() 1769 find_metapath(sdp, lend, &mp, ip->i_height); in punch_hole()
|
/linux/Documentation/arch/powerpc/ |
H A D | qe_firmware.rst | 172 do not lend themselves to simple inclusion into other code. Hence,
|
/linux/include/linux/ |
H A D | blkdev.h | 1664 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
|
/linux/fs/ceph/ |
H A D | inode.c | 2365 loff_t lend = orig_pos + CEPH_FSCRYPT_BLOCK_SHIFT - 1; in fill_fscrypt_truncate() local 2368 orig_pos, lend); in fill_fscrypt_truncate()
|
H A D | mds_client.c | 318 void *lend; in parse_reply_info_lease() local 338 lend = *p + struct_len; in parse_reply_info_lease() 354 *p = lend; in parse_reply_info_lease()
|
/linux/LICENSES/deprecated/ |
H A D | GFDL-1.1 | 129 You may also lend copies, under the same conditions stated above, and
|
H A D | GFDL-1.2 | 154 You may also lend copies, under the same conditions stated above, and
|
/linux/Documentation/userspace-api/media/ |
H A D | fdl-appendix.rst | 138 You may also lend copies, under the same conditions stated above, and
|