Lines Matching +full:compare +full:- +full:and +full:- +full:swap
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
17 #include <linux/swap.h>
20 #include <linux/tracepoint-defs.h>
29 * and destination VMAs already exist and are specified by the user.
31 * Partial moves are permitted, but the old and new ranges must both reside
34 * mmap lock must be held in write and VMA write locks must be held on any VMA
39 * The old_addr and new_addr fields are updated as the page table move is
43 * and old_addr may be updated for better page table alignment, so len_in
69 * The set of flags that only affect watermark checking and reclaim
71 * about IO, FS and watermark checking while ignoring placement
82 /* Control allocation cpuset and node placement constraints */
112 #define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1)
115 * Flags passed to __show_mem() and show_free_areas() to suppress output in
129 return -1; in folio_nr_pages_mapped()
130 return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED; in folio_nr_pages_mapped()
135 * folio. We cannot rely on folio->swap as there is no guarantee that it has
141 swp_entry_t swap = { in folio_swap() local
145 return swap; in folio_swap()
150 unsigned long mapping = (unsigned long)folio->mapping; in folio_raw_mapping()
156 * This is a file-backed mapping, and is about to be memory mapped - invoke its
157 * mmap hook and safely handle error conditions. On error, VMA hooks will be
174 * arose. The mapping is in an inconsistent state and we most not invoke in mmap_file()
177 vma->vm_ops = &vma_dummy_vm_ops; in mmap_file()
183 * If the VMA has a close hook then close it, and since closing it might leave
189 if (vma->vm_ops && vma->vm_ops->close) { in vma_close()
190 vma->vm_ops->close(vma); in vma_close()
193 * The mapping is in an inconsistent state, and no further hooks in vma_close()
196 vma->vm_ops = &vma_dummy_vm_ops; in vma_close()
205 /* Compare PTEs respecting the dirty bit. */
208 /* Compare PTEs respecting the soft-dirty bit. */
211 /* Compare PTEs respecting the writable bit. */
221 * Merge PTE young and dirty bits: if any PTE in the batch is young or dirty,
238 * folio_pte_batch_flags - detect a PTE batch for a large folio
248 * pages of the same large folio in a single VMA and a single page table.
252 * and soft-dirty bit (unless FPB_RESPECT_SOFT_DIRTY is set).
254 * @ptep must map any page of the folio. max_nr must be at least one and
255 * must be limited by the caller so scanning cannot exceed a single VMA and
287 folio_pfn(folio) + folio_nr_pages(folio) - pte_pfn(pte)); in folio_pte_batch_flags()
326 * pte_move_swp_offset - Move the swap entry offset field of a swap pte
328 * @pte: The initial pte state; is_swap_pte(pte) must be true and
330 * @delta: The direction and the offset we are moving; forward if delta
333 * Moves the swap offset, while maintaining all other fields, including
334 * swap type, and any swp pte bits. The resulting pte is returned.
354 * pte_next_swp_offset - Increment the swap entry offset field of a swap pte.
355 * @pte: The initial pte state; is_swap_pte(pte) must be true and
358 * Increments the swap offset, while maintaining all other fields, including
359 * swap type, and any swp pte bits. The resulting pte is returned.
367 * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries
372 * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs
373 * containing swap entries all with consecutive offsets and targeting the same
374 * swap type, all with matching swp pte bits.
376 * max_nr must be at least one and must be limited by the caller so scanning
405 return ptep - start_ptep; in swap_pte_batch()
414 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled); in acct_reclaim_writeback()
424 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED]; in wake_throttle_isolated()
435 vma_end_read(vmf->vma); in vmf_anon_prepare()
466 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index); in force_page_cache_readahead()
483 * folio_evictable - Test whether a folio is evictable.
486 * Test whether @folio is evictable -- i.e., should be placed on
497 /* Prevent address_space of inode and swap cache from being freed */ in folio_evictable()
506 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
517 * Return true if a folio needs ->release_folio() calling upon it.
560 #define K(x) ((x) << (PAGE_SHIFT-10))
580 * nodemask, migratetype and highest_zoneidx are initialized only once in
581 * __alloc_pages() and then never change.
583 * zonelist, preferred_zone and highest_zoneidx are set first in
584 * __alloc_pages() for the fast path, and might be later changed
610 * general, page_zone(page)->lock must be held by the caller to prevent the
611 * page from being allocated in parallel and returning garbage as the order.
612 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
614 * handle invalid values gracefully, and use buddy_order_unsafe() below.
625 * and invalid values must be handled gracefully.
628 * variable and e.g. tests it for valid range before using, the compiler cannot
629 * decide to remove the variable and inline the page_private(page) multiple
630 * times, potentially observing different values in the tests and the actual
637 * we can coalesce a page and its buddy if
640 * (c) a page and its buddy have the same order &&
641 * (d) a page and its buddy are in the same zone.
644 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
671 * pair (buddy1) and the combined O(n+1) page they form (page).
693 * Find the buddy of @page and validate it.
696 * function is used in the performance-critical __free_one_page().
712 buddy = page + (__buddy_pfn - pfn); in find_buddy_page_pfn()
727 if (zone->contiguous) in pageblock_pfn_to_page()
739 zone->contiguous = false; in clear_zone_contiguous()
752 * caller passes in a non-large folio.
760 folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order; in folio_set_order()
762 folio->_nr_pages = 1U << order; in folio_set_order()
777 if (data_race(list_empty(&folio->_deferred_list))) in folio_unqueue_deferred_split()
797 atomic_set(&folio->_large_mapcount, -1); in prep_compound_head()
799 atomic_set(&folio->_nr_pages_mapped, 0); in prep_compound_head()
801 folio->_mm_ids = 0; in prep_compound_head()
802 folio->_mm_id_mapcount[0] = -1; in prep_compound_head()
803 folio->_mm_id_mapcount[1] = -1; in prep_compound_head()
806 atomic_set(&folio->_pincount, 0); in prep_compound_head()
807 atomic_set(&folio->_entire_mapcount, -1); in prep_compound_head()
810 INIT_LIST_HEAD(&folio->_deferred_list); in prep_compound_head()
817 p->mapping = TAIL_MAPPING; in prep_compound_tail()
869 * compact_control is used to track pages being migrated and the free pages
871 * at the end of a zone and migrate_pfn begins at the start. Movable pages
872 * are moved to the end of a zone during a compaction run and the run
931 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
957 return list_empty(&area->free_list[migratetype]); in free_area_empty()
982 * and the range [start, end) is intersect with the VMA range. Caller wants
998 if (start < vma->vm_start) in folio_within_range()
999 start = vma->vm_start; in folio_within_range()
1001 if (end > vma->vm_end) in folio_within_range()
1002 end = vma->vm_end; in folio_within_range()
1007 if (!in_range(pgoff, vma->vm_pgoff, vma_pglen)) in folio_within_range()
1010 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in folio_within_range()
1012 return !(addr < start || end - addr < folio_size(folio)); in folio_within_range()
1018 return folio_within_range(folio, vma, vma->vm_start, vma->vm_end); in folio_within_vma()
1022 * mlock_vma_folio() and munlock_vma_folio():
1036 * 1) VM_IO check prevents migration from double-counting during mlock. in mlock_vma_folio()
1037 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED in mlock_vma_folio()
1039 * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may in mlock_vma_folio()
1042 if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED)) in mlock_vma_folio()
1052 * do munlock if any page of folio is unmapped from VMA and in munlock_vma_folio()
1056 * always munlock the folio and page reclaim will correct it in munlock_vma_folio()
1059 if (unlikely(vma->vm_flags & VM_LOCKED)) in munlock_vma_folio()
1071 * vma_address - Find the virtual address a page range is mapped at
1077 * where any of these pages appear. Otherwise, return -EFAULT.
1084 if (pgoff >= vma->vm_pgoff) { in vma_address()
1085 address = vma->vm_start + in vma_address()
1086 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in vma_address()
1088 if (address < vma->vm_start || address >= vma->vm_end) in vma_address()
1089 address = -EFAULT; in vma_address()
1090 } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) { in vma_address()
1091 /* Test above avoids possibility of wrap to 0 on 32-bit */ in vma_address()
1092 address = vma->vm_start; in vma_address()
1094 address = -EFAULT; in vma_address()
1105 struct vm_area_struct *vma = pvmw->vma; in vma_address_end()
1109 /* Common case, plus ->pgoff is invalid for KSM */ in vma_address_end()
1110 if (pvmw->nr_pages == 1) in vma_address_end()
1111 return pvmw->address + PAGE_SIZE; in vma_address_end()
1113 pgoff = pvmw->pgoff + pvmw->nr_pages; in vma_address_end()
1114 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in vma_address_end()
1116 if (address < vma->vm_start || address > vma->vm_end) in vma_address_end()
1117 address = vma->vm_end; in vma_address_end()
1124 int flags = vmf->flags; in maybe_unlock_mmap_for_io()
1131 * anything, so we only pin the file and drop the mmap_lock if only in maybe_unlock_mmap_for_io()
1136 fpin = get_file(vmf->vma->vm_file); in maybe_unlock_mmap_for_io()
1152 /* Memory initialisation debug and verification */
1199 #define NODE_RECLAIM_NOSCAN -2
1200 #define NODE_RECLAIM_FULL -1
1230 * mm/memory-failure.c
1254 return -EBUSY; in unmap_poisoned_folio()
1266 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1273 #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1276 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
1369 return -EINVAL; in vmap_pages_range_noflush()
1422 return -EINVAL; in get_order_from_str()
1425 return -EINVAL; in get_order_from_str()
1435 /* we are working on non-current tsk/mm */
1439 /* gup_fast: prevent fall-back to slow gup */
1452 * Indicates for which pages that are write-protected in the page table,
1460 * * GUP-fast and fork(): mm->write_protect_seq
1461 * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1466 * PTE-mapped THP.
1468 * If the vma is NULL, we're coming from the GUP-fast path and might have
1476 * has to be writable -- and if it references (part of) an anonymous in gup_must_unshare()
1487 * We only care about R/O long-term pining: R/O short-term in gup_must_unshare()
1502 return is_cow_mapping(vma->vm_flags); in gup_must_unshare()
1510 * Note that KSM pages cannot be exclusive, and consequently, in gup_must_unshare()
1524 vma->vm_start = start; in vma_set_range()
1525 vma->vm_end = end; in vma_set_range()
1526 vma->vm_pgoff = pgoff; in vma_set_range()
1532 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty in vma_soft_dirty_enabled()
1533 * enablements, because when without soft-dirty being compiled in, in vma_soft_dirty_enabled()
1541 * Soft-dirty is kind of special: its tracking is enabled when the in vma_soft_dirty_enabled()
1544 return !(vma->vm_flags & VM_SOFTDIRTY); in vma_soft_dirty_enabled()
1569 shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap); in shrinker_debugfs_name_alloc()
1571 return shrinker->name ? 0 : -ENOMEM; in shrinker_debugfs_name_alloc()
1576 kfree_const(shrinker->name); in shrinker_debugfs_name_free()
1577 shrinker->name = NULL; in shrinker_debugfs_name_free()
1601 *debugfs_id = -1; in shrinker_debugfs_detach()