Lines Matching +defs:end +defs:file

9  * This file handles the generic file mmap semantics used by
28 #include <linux/file.h>
396 * @end: offset in bytes where the range ends (inclusive)
400 * within the byte offsets <start, end> inclusive.
410 loff_t end, int sync_mode)
416 .range_end = end,
435 loff_t end)
437 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
445 * @end: last (inclusive) index for writeback
453 loff_t end)
455 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_NONE);
521 pgoff_t end = end_byte >> PAGE_SHIFT;
527 while (index <= end) {
530 nr_folios = filemap_get_folios_tag(mapping, &index, end,
594 * @file: file pointing to address space structure to wait for
598 * Walk the list of under-writeback pages of the address space that file
600 * status of the address space vs. the file->f_wb_err cursor and return it.
602 * Since the error status of the file is advanced by this function,
606 * Return: error status of the address space vs. the file->f_wb_err cursor.
608 int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
610 struct address_space *mapping = file->f_mapping;
613 return file_check_and_advance_wb_err(file);
670 * filemap_write_and_wait_range - write out & wait on a file range
675 * Write out and wait upon file offsets lstart->lend, inclusive.
678 * that this function can be used to write to the very end-of-file (end = -1).
720 * @file: struct file on which the error is being reported
724 * since the file was opened if there haven't been any).
726 * Grab the wb_err from the mapping. If it matches what we have in the file,
727 * then just quickly return 0. The file is all caught up.
737 * the latest value swapped in for this file descriptor.
741 int file_check_and_advance_wb_err(struct file *file)
744 errseq_t old = READ_ONCE(file->f_wb_err);
745 struct address_space *mapping = file->f_mapping;
750 spin_lock(&file->f_lock);
751 old = file->f_wb_err;
753 &file->f_wb_err);
754 trace_file_check_and_advance_wb_err(file, old);
755 spin_unlock(&file->f_lock);
770 * file_write_and_wait_range - write out & wait on a file range
771 * @file: file pointing to address_space with pages
775 * Write out and wait upon file offsets lstart->lend, inclusive.
778 * that this function can be used to write to the very end-of-file (end = -1).
785 int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
788 struct address_space *mapping = file->f_mapping;
800 err2 = file_check_and_advance_wb_err(file);
1646 * end of every folio writeback.
1846 * Because the refcount temporarily acquired here may end up being the
2069 * @end: The final page index (inclusive).
2086 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2092 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2118 * @end: The final page index (inclusive).
2135 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2141 while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
2151 /* Omit large folio which extends beyond the end */
2152 if (base + nr - 1 > end)
2167 /* Omit order>0 value which extends beyond the end */
2168 if (base + nr - 1 > end)
2192 * @end: The final page index (inclusive)
2196 * index @start and up to index @end (inclusive). The folios are returned
2203 pgoff_t end, struct folio_batch *fbatch)
2205 return filemap_get_folios_tag(mapping, start, end, XA_PRESENT, fbatch);
2213 * @end: The final page index (inclusive)
2225 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch)
2233 for (folio = xas_load(&xas); folio && xas.xa_index <= end;
2244 /* If we landed in the middle of a THP, continue at its end. */
2285 * @end: The final page index (inclusive)
2290 * @start. The final folio may extend beyond @end; if it does, it will
2291 * contain @end. The folios have ascending indices. There may be gaps
2301 pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch)
2307 while ((folio = find_get_entry(&xas, end, tag)) != NULL) {
2322 * We come here when there is no page beyond @end. We take care to not
2327 if (end == (pgoff_t)-1)
2330 *start = end + 1;
2362 * the file. No exceptional entries will be returned. If @index is in
2403 static int filemap_read_folio(struct file *file, filler_t filler,
2413 error = filler(file, folio);
2424 if (file)
2425 shrink_readahead_size_eio(&file->f_ra);
2473 * This is where we usually end up waiting for a
2562 static int filemap_readahead(struct kiocb *iocb, struct file *file,
2566 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index);
2579 struct file *filp = iocb->ki_filp;
2587 /* "last_index" is the index of the page beyond the end of the read */
2678 struct file *filp = iocb->ki_filp;
2722 * another truncate extends the file - this is desired though).
2794 loff_t end = pos + count - 1;
2797 if (filemap_range_needs_writeback(mapping, pos, end))
2802 return filemap_write_and_wait_range(mapping, pos, end);
2807 loff_t pos, loff_t end, bool nowait)
2813 if (filemap_range_has_page(mapping, pos, end))
2816 ret = filemap_write_and_wait_range(mapping, pos, end);
2828 end >> PAGE_SHIFT);
2872 struct file *file = iocb->ki_filp;
2873 struct address_space *mapping = file->f_mapping;
2879 file_accessed(file);
2942 * filemap_splice_read - Splice data from a file's pagecache into a pipe
2943 * @in: The file to read from
2944 * @ppos: Pointer to the file position to read from
2949 * This function gets folios from a file's pagecache and splices them into the
2957 * if the pipe has insufficient space, we reach the end of the data or we hit a
2960 ssize_t filemap_splice_read(struct file *in, loff_t *ppos,
3001 * another truncate extends the file - this is desired though).
3055 loff_t start, loff_t end, bool seek_data)
3061 return seek_data ? start : end;
3063 return seek_data ? end : start;
3097 * @end: Limit of search (exclusive).
3108 * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start
3109 * and @end contain data.
3112 loff_t end, int whence)
3115 pgoff_t max = (end - 1) >> PAGE_SHIFT;
3119 if (end <= start)
3139 if (start >= end)
3152 if (start > end)
3153 return end;
3163 * @fpin - the pointer to the file we may pin (or is already pinned).
3168 * to drop the mmap_lock then fpin will point to the pinned file and
3172 struct file **fpin)
3208 * to drop the mmap sem we return the file that was pinned in order for us to do
3209 * that. If we didn't pin a file then we return NULL. The file that is
3212 static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
3214 struct file *file = vmf->vma->vm_file;
3215 struct file_ra_state *ra = &file->f_ra;
3216 struct address_space *mapping = file->f_mapping;
3217 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
3218 struct file *fpin = NULL;
3262 * Do we miss much more than hit in this file? If so,
3282 unsigned long end = start + vma_pages(vma);
3289 ra_end = min(ra_end, end);
3310 * so we want to possibly extend the readahead further. We return the file that
3313 static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
3316 struct file *file = vmf->vma->vm_file;
3317 struct file_ra_state *ra = &file->f_ra;
3318 DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff);
3319 struct file *fpin = NULL;
3381 * filemap_fault - read in file data for page fault handling
3385 * mapped memory region to read in file data during a page fault.
3406 struct file *file = vmf->vma->vm_file;
3407 struct file *fpin = NULL;
3408 struct address_space *mapping = file->f_mapping;
3537 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
3653 * in a file, they will probably continue to be evicted.
3736 struct file *file = vma->vm_file;
3737 struct address_space *mapping = file->f_mapping;
3771 unsigned long end;
3776 end = folio_next_index(folio) - 1;
3777 nr_pages = min(end, end_pgoff) - xas.xa_index + 1;
3796 mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss);
3798 WRITE_ONCE(file->f_ra.mmap_miss, 0);
3800 WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss);
3838 /* This is used for a general mmap of a disk file */
3840 int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3842 struct address_space *mapping = file->f_mapping;
3846 file_accessed(file);
3853 struct file *file = desc->file;
3854 struct address_space *mapping = file->f_mapping;
3858 file_accessed(file);
3866 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3870 return generic_file_mmap(file, vma);
3884 int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3892 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3909 pgoff_t index, filler_t filler, struct file *file, gfp_t gfp)
3957 err = filemap_read_folio(file, filler, folio);
3975 * @file: Passed to filler function, may be NULL if not required.
3987 filler_t filler, struct file *file)
3989 return do_read_cache_folio(mapping, index, filler, file,
4019 pgoff_t index, filler_t *filler, struct file *file, gfp_t gfp)
4023 folio = do_read_cache_folio(mapping, index, filler, file, gfp);
4030 pgoff_t index, filler_t *filler, struct file *file)
4032 return do_read_cache_page(mapping, index, filler, file,
4063 static void dio_warn_stale_pagecache(struct file *filp)
4114 * if the source of the write was an mmap'ed region of the file
4120 * the invalidation for us. However there are some file systems that
4121 * do not end up with dio_complete() being called, so let's not break
4149 struct file *file = iocb->ki_filp;
4151 struct address_space *mapping = file->f_mapping;
4242 * __generic_file_write_iter - write data to a file
4243 * @iocb: IO state structure (file, offset, etc.)
4247 * file. It does all basic checks, removes SUID from the file, updates
4264 struct file *file = iocb->ki_filp;
4265 struct address_space *mapping = file->f_mapping;
4269 ret = file_remove_privs(file);
4273 ret = file_update_time(file);
4297 * generic_file_write_iter - write data to a file
4302 * filesystems. It takes care of syncing the file in case of O_SYNC file
4311 struct file *file = iocb->ki_filp;
4312 struct inode *inode = file->f_mapping->host;
4365 * @end: Last byte in range (inclusive), or LLONG_MAX for everything from start
4374 loff_t start, loff_t end)
4378 pgoff_t last = end >> PAGE_SHIFT;
4379 pgoff_t nr = end == LLONG_MAX ? ULONG_MAX : last - first + 1;
4381 if (!mapping || !mapping->nrpages || end < start)
4398 .range_end = end,
4405 invalidate_inode_pages2_range(mapping, start / PAGE_SIZE, end / PAGE_SIZE);
4477 /* shmem file - in swap cache */
4528 static inline bool can_do_cachestat(struct file *f)
4540 * cachestat() returns the page cache statistics of a file in the
4553 * we will query in the range from `off` to the end of the file.
4568 * -EBADF - invalid file descriptor
4569 * -EOPNOTSUPP - file descriptor is of a hugetlbfs file