Lines Matching +full:non +full:- +full:inclusive
1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm/truncate.c - code for taking down pages from address_spaces
12 #include <linux/backing-dev.h>
29 XA_STATE(xas, &mapping->i_pages, start);
38 spin_lock(&mapping->host->i_lock);
49 inode_add_lru(mapping->host);
50 spin_unlock(&mapping->host->i_lock);
63 XA_STATE(xas, &mapping->i_pages, indices[0]);
73 if (xa_is_value(fbatch->folios[j]))
81 if (xa_is_value(fbatch->folios[i])) {
104 spin_lock(&mapping->host->i_lock);
107 xas_for_each(&xas, folio, indices[nr-1]) {
114 inode_add_lru(mapping->host);
115 spin_unlock(&mapping->host->i_lock);
121 * folio_invalidate - Invalidate part or all of a folio.
133 * blocks on-disk.
137 const struct address_space_operations *aops = folio->mapping->a_ops;
139 if (aops->invalidate_folio)
140 aops->invalidate_folio(folio, offset, length);
145 * If truncate cannot remove the fs-private metadata from the page, the page
149 * We need to bail out if page->mapping is no longer equal to the original
163 * Some filesystems seem to re-dirty the page even after
172 if (folio->mapping != mapping)
173 return -EIO;
199 offset = start - pos;
203 length = size - offset;
205 length = end + 1 - pos - offset;
209 truncate_inode_folio(folio->mapping, folio);
218 if (!mapping_inaccessible(folio->mapping))
256 folio2->mapping == folio->mapping)
267 truncate_inode_folio(folio->mapping, folio);
278 return -EINVAL;
283 if (!S_ISREG(mapping->host->i_mode))
284 return -EIO;
290 * mapping_evict_folio() - Remove an unused folio from the page-cache.
318 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
321 * @lend: offset to which to truncate (inclusive)
327 * Truncate takes two passes - the first pass is nonblocking. It will not
333 * We pass down the cache-hot hint to the page freeing code. Even if the
337 * Note that since ->invalidate_folio() accepts range to invalidate
344 pgoff_t start; /* inclusive */
360 * Note that 'end' is exclusive while 'lend' is inclusive.
362 start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
363 if (lend == -1)
365 * lend == -1 indicates end-of-file so we have to set 'end'
367 * unsigned we're using -1.
369 end = -1;
375 while (index < end && find_lock_entries(mapping, &index, end - 1,
394 end = folio->index;
406 end = folio->index;
415 if (!find_get_entries(mapping, &index, end - 1, &fbatch,
428 /* We rely upon deletion not changing folio->index */
446 * truncate_inode_pages - truncate *all* the pages from an offset
450 * Called under (and serialised by) inode->i_rwsem and
451 * mapping->invalidate_lock.
455 * mapping->nrpages can be non-zero when this function returns even after
460 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
465 * truncate_inode_pages_final - truncate *all* pages before inode dies
468 * Called under (and serialized by) inode->i_rwsem.
491 xa_lock_irq(&mapping->i_pages);
492 xa_unlock_irq(&mapping->i_pages);
500 * mapping_try_invalidate - Invalidate all the evictable folios of one inode
503 * @end: the offset 'to' which to invalidate (inclusive)
527 /* We rely upon deletion not changing folio->index */
551 clear_shadow_entries(mapping, indices[0], indices[nr-1]);
561 * invalidate_mapping_pages - Invalidate all clean, unlocked cache of one inode
564 * @end: the offset 'to' which to invalidate (inclusive)
585 if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
587 return mapping->a_ops->launder_folio(folio);
611 if (folio->mapping != mapping)
612 return -EBUSY;
614 return -EBUSY;
616 spin_lock(&mapping->host->i_lock);
617 xa_lock_irq(&mapping->i_pages);
623 xa_unlock_irq(&mapping->i_pages);
625 inode_add_lru(mapping->host);
626 spin_unlock(&mapping->host->i_lock);
631 xa_unlock_irq(&mapping->i_pages);
632 spin_unlock(&mapping->host->i_lock);
633 return -EBUSY;
637 * invalidate_inode_pages2_range - remove range of pages from an address_space
640 * @end: the page offset 'to' which to invalidate (inclusive)
645 * Return: -EBUSY if any pages could not be invalidated.
670 /* We rely upon deletion not changing folio->index */
676 ret = -EBUSY;
686 (1 + end - indices[i]), false);
691 if (unlikely(folio->mapping != mapping)) {
704 clear_shadow_entries(mapping, indices[0], indices[nr-1]);
718 unmap_mapping_pages(mapping, start, end - start + 1, false);
725 * invalidate_inode_pages2 - remove all pages from an address_space
731 * Return: -EBUSY if any pages could not be invalidated.
735 return invalidate_inode_pages2_range(mapping, 0, -1);
740 * truncate_pagecache - unmap and remove pagecache that has been truncated
750 * with on-disk format, and the filesystem would not have to deal with
756 struct address_space *mapping = inode->i_mapping;
762 * single-page unmaps. However after this first call, and
775 * truncate_setsize - update inode and pagecache for a new file size
789 loff_t oldsize = inode->i_size;
799 * pagecache_isize_extended - update pagecache after extension of i_size
807 * write access to the page. The filesystem will update its per-block
813 * The function must be called while we still hold i_rwsem - this not only
823 WARN_ON(to > inode->i_size);
829 if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
832 folio = filemap_lock_folio(inode->i_mapping, from / PAGE_SIZE);
844 * The post-eof range of the folio must be zeroed before it is exposed
851 offset = from - folio_pos(folio);
852 end = min_t(unsigned int, to - folio_pos(folio),
863 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
871 * with on-disk format, and the filesystem would not have to deal with
877 struct address_space *mapping = inode->i_mapping;
879 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
885 * allows holelen 0 for all, and we allow lend -1 for end of file.
891 * hole-punching should not remove private COWed pages from the hole.
895 1 + unmap_end - unmap_start, 0);