| /linux/include/linux/ |
| H A D | leafops.h | 178 VM_WARN_ON_ONCE(1); in softleaf_type() 319 VM_WARN_ON_ONCE(!softleaf_is_marker(entry)); in softleaf_to_marker() 360 VM_WARN_ON_ONCE(!softleaf_has_pfn(entry)); in softleaf_to_pfn() 376 VM_WARN_ON_ONCE(!softleaf_has_pfn(entry)); in softleaf_to_page() 381 VM_WARN_ON_ONCE(softleaf_is_migration(entry) && !PageLocked(page)); in softleaf_to_page() 396 VM_WARN_ON_ONCE(!softleaf_has_pfn(entry)); in softleaf_to_folio() 401 VM_WARN_ON_ONCE(softleaf_is_migration(entry) && in softleaf_to_folio() 469 VM_WARN_ON_ONCE(!softleaf_is_migration(entry)); in softleaf_is_migration_young() 488 VM_WARN_ON_ONCE(!softleaf_is_migration(entry)); in softleaf_is_migration_dirty()
|
| H A D | rmap.h | 190 VM_WARN_ON_ONCE(idx != 0 && idx != 1); in folio_mm_id() 196 VM_WARN_ON_ONCE(idx != 0 && idx != 1); in folio_set_mm_id() 204 VM_WARN_ON_ONCE(!folio_test_large(folio) || folio_test_hugetlb(folio)); in __folio_large_mapcount_sanity_checks() 205 VM_WARN_ON_ONCE(diff <= 0); in __folio_large_mapcount_sanity_checks() 206 VM_WARN_ON_ONCE(mm_id < MM_ID_MIN || mm_id > MM_ID_MAX); in __folio_large_mapcount_sanity_checks() 214 VM_WARN_ON_ONCE(diff > folio_large_nr_pages(folio)); in __folio_large_mapcount_sanity_checks() 215 VM_WARN_ON_ONCE(folio_large_nr_pages(folio) - 1 > MM_ID_MAPCOUNT_MAX); in __folio_large_mapcount_sanity_checks() 217 VM_WARN_ON_ONCE(folio_mm_id(folio, 0) == MM_ID_DUMMY && in __folio_large_mapcount_sanity_checks() 219 VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != MM_ID_DUMMY && in __folio_large_mapcount_sanity_checks() 221 VM_WARN_ON_ONCE(folio_mm_id(folio, 1) == MM_ID_DUMMY && in __folio_large_mapcount_sanity_checks() [all …]
|
| H A D | mm_inline.h | 138 VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH)); in lru_tier_from_refs() 168 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS); in lru_gen_is_active() 183 VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS); in lru_gen_update_size() 184 VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS); in lru_gen_update_size() 185 VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1); in lru_gen_update_size() 217 VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen)); in lru_gen_update_size()
|
| H A D | vmstat.h | 199 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); in global_node_page_state() 328 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); in __mod_node_page_state()
|
| /linux/arch/powerpc/include/asm/book3s/64/ |
| H A D | tlbflush.h | 177 VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED); in __pte_flags_need_flush() 178 VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED); in __pte_flags_need_flush() 179 VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE)); in __pte_flags_need_flush() 180 VM_WARN_ON_ONCE(!(newval & _PAGE_PTE)); in __pte_flags_need_flush() 181 VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT)); in __pte_flags_need_flush() 182 VM_WARN_ON_ONCE(!(newval & _PAGE_PRESENT)); in __pte_flags_need_flush()
|
| /linux/mm/ |
| H A D | swap_table.h | 44 VM_WARN_ON_ONCE(shadow && !xa_is_value(shadow)); in shadow_swp_to_tb() 91 VM_WARN_ON_ONCE(off >= SWAPFILE_CLUSTER); in __swap_table_set() 101 VM_WARN_ON_ONCE(off >= SWAPFILE_CLUSTER); in __swap_table_xchg() 111 VM_WARN_ON_ONCE(off >= SWAPFILE_CLUSTER); in __swap_table_get()
|
| H A D | userfaultfd.c | 568 VM_WARN_ON_ONCE(dst_addr >= dst_start + len); in mfill_atomic_hugetlb() 612 VM_WARN_ON_ONCE(!folio); in mfill_atomic_hugetlb() 624 VM_WARN_ON_ONCE(folio); in mfill_atomic_hugetlb() 645 VM_WARN_ON_ONCE(copied < 0); in mfill_atomic_hugetlb() 646 VM_WARN_ON_ONCE(err > 0); in mfill_atomic_hugetlb() 647 VM_WARN_ON_ONCE(!copied && !err); in mfill_atomic_hugetlb() 721 VM_WARN_ON_ONCE(dst_start & ~PAGE_MASK); in mfill_atomic() 722 VM_WARN_ON_ONCE(len & ~PAGE_MASK); in mfill_atomic() 725 VM_WARN_ON_ONCE(src_start + len <= src_start); in mfill_atomic() 726 VM_WARN_ON_ONCE(dst_start + len <= dst_start); in mfill_atomic() [all …]
|
| H A D | swap.h | 74 VM_WARN_ON_ONCE(percpu_ref_is_zero(&si->users)); /* race with swapoff */ in __swap_type_to_info() 86 VM_WARN_ON_ONCE(percpu_ref_is_zero(&si->users)); /* race with swapoff */ in __swap_offset_to_cluster() 87 VM_WARN_ON_ONCE(offset >= si->max); in __swap_offset_to_cluster() 111 VM_WARN_ON_ONCE(!in_task()); in __swap_cluster_lock() 112 VM_WARN_ON_ONCE(percpu_ref_is_zero(&si->users)); /* race with swapoff */ in __swap_cluster_lock()
|
| H A D | pgtable-generic.c | 202 VM_WARN_ON_ONCE(!pmd_present(*pmdp)); in pmdp_invalidate() 213 VM_WARN_ON_ONCE(!pmd_present(*pmdp)); in pmdp_invalidate_ad() 327 VM_WARN_ON_ONCE(!pmdvalp); in pte_offset_map_rw_nolock()
|
| H A D | mmu_gather.c | 176 VM_WARN_ON_ONCE(nr_pages != 1 && page_size != PAGE_SIZE); in __tlb_remove_folio_pages_size() 177 VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1)); in __tlb_remove_folio_pages_size() 502 VM_WARN_ON_ONCE(tlb->fully_unshared_tables); in tlb_finish_mmu()
|
| H A D | vmscan.c | 2719 VM_WARN_ON_ONCE(!mem_cgroup_disabled()); in get_lruvec() 2869 VM_WARN_ON_ONCE(!mem_cgroup_disabled()); in get_mm_list() 2903 VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list)); in lru_gen_add_mm() 2905 VM_WARN_ON_ONCE(mm->lru_gen.memcg); in lru_gen_add_mm() 2969 VM_WARN_ON_ONCE(task->mm != mm); in lru_gen_migrate_mm() 2986 VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list)); in lru_gen_migrate_mm() 3059 VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->seq); in iterate_mm_list() 3110 VM_WARN_ON_ONCE(mm_state->seq + 1 < seq); in iterate_mm_list_nowalk() 3228 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS); in folio_update_gen() 3285 VM_WARN_ON_ONCE(old_gen >= MAX_NR_GENS); in update_batch_size() [all …]
|
| H A D | gup.c | 361 VM_WARN_ON_ONCE(!page_range_contiguous(page, npages)); in unpin_user_page_range_dirty_lock() 523 VM_WARN_ON_ONCE(!irqs_disabled()); in try_grab_folio_fast() 1123 VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_WRITE); in faultin_page() 1368 VM_WARN_ON_ONCE(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); in __get_user_pages() 1371 VM_WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) == in __get_user_pages() 1701 VM_WARN_ON_ONCE(!*locked && (ret < 0 || ret >= nr_pages)); in __get_user_pages_locked() 1756 VM_WARN_ON_ONCE(ret != 0); in __get_user_pages_locked() 1760 VM_WARN_ON_ONCE(ret > 1); in __get_user_pages_locked() 1822 VM_WARN_ON_ONCE(!PAGE_ALIGNED(start)); in populate_vma_page_range() 1823 VM_WARN_ON_ONCE(!PAGE_ALIGNED(end)); in populate_vma_page_range() [all …]
|
| H A D | swap_state.c | 192 VM_WARN_ON_ONCE(__swap_entry_to_cluster(entry) != ci); in __swap_cache_del_folio() 260 VM_WARN_ON_ONCE(!folio_test_swapcache(old) || !folio_test_swapcache(new)); in __swap_cache_replace_folio() 261 VM_WARN_ON_ONCE(!folio_test_locked(old) || !folio_test_locked(new)); in __swap_cache_replace_folio() 262 VM_WARN_ON_ONCE(!entry.val); in __swap_cache_replace_folio()
|
| H A D | mempool.c | 500 VM_WARN_ON_ONCE(count > pool->min_nr); in mempool_alloc_bulk_noprof() 558 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); in mempool_alloc_noprof()
|
| H A D | mremap.c | 267 VM_WARN_ON_ONCE(!pte_none(*new_ptep)); in move_ptes() 1895 VM_WARN_ON_ONCE(multi_allowed && res_vma != new_addr); in remap_move() 1900 VM_WARN_ON_ONCE(!vrm->mmap_locked); in remap_move() 1902 VM_WARN_ON_ONCE(vrm->populate_expand); in remap_move()
|
| H A D | vmstat.c | 391 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); in __mod_node_page_state() 462 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); in __inc_node_state() 518 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); in __dec_node_state() 633 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); in mod_node_state() 1038 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); in node_page_state()
|
| H A D | swapfile.c | 475 VM_WARN_ON_ONCE(!cluster_is_empty(ci)); in swap_cluster_free_table() 477 VM_WARN_ON_ONCE(!swp_tb_is_null(__swap_table_get(ci, ci_off))); in swap_cluster_free_table() 502 VM_WARN_ON_ONCE(ci->flags || !cluster_is_empty(ci)); in swap_cluster_alloc_table() 619 VM_WARN_ON_ONCE(list != &si->free_clusters); in isolate_lock_cluster() 620 VM_WARN_ON_ONCE(!cluster_is_empty(found)); in isolate_lock_cluster() 861 VM_WARN_ON_ONCE(!swp_tb_is_null(swp_tb)); in swap_cluster_assert_table_empty() 1443 VM_WARN_ON_ONCE(1); in folio_alloc_swap() 2221 VM_WARN_ON_ONCE(folio_test_large(folio)); in unuse_pte()
|
| H A D | zswap.c | 1504 VM_WARN_ON_ONCE(!folio_test_locked(folio)); in zswap_store() 1505 VM_WARN_ON_ONCE(!folio_test_swapcache(folio)); in zswap_store() 1608 VM_WARN_ON_ONCE(!folio_test_locked(folio)); in zswap_load()
|
| /linux/fs/ |
| H A D | userfaultfd.c | 168 VM_WARN_ON_ONCE(spin_is_locked(&ctx->fault_pending_wqh.lock)); in userfaultfd_ctx_put() 169 VM_WARN_ON_ONCE(waitqueue_active(&ctx->fault_pending_wqh)); in userfaultfd_ctx_put() 170 VM_WARN_ON_ONCE(spin_is_locked(&ctx->fault_wqh.lock)); in userfaultfd_ctx_put() 171 VM_WARN_ON_ONCE(waitqueue_active(&ctx->fault_wqh)); in userfaultfd_ctx_put() 172 VM_WARN_ON_ONCE(spin_is_locked(&ctx->event_wqh.lock)); in userfaultfd_ctx_put() 173 VM_WARN_ON_ONCE(waitqueue_active(&ctx->event_wqh)); in userfaultfd_ctx_put() 174 VM_WARN_ON_ONCE(spin_is_locked(&ctx->fd_wqh.lock)); in userfaultfd_ctx_put() 175 VM_WARN_ON_ONCE(waitqueue_active(&ctx->fd_wqh)); in userfaultfd_ctx_put() 271 VM_WARN_ON_ONCE(1); in userfaultfd_huge_must_wait() 404 VM_WARN_ON_ONCE(ctx->mm != mm); in handle_userfault() [all …]
|
| /linux/arch/powerpc/mm/ |
| H A D | mmu_context.c | 51 VM_WARN_ON_ONCE(next == &init_mm); in switch_mm_irqs_off() 106 VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(prev))); in switch_mm_irqs_off()
|
| /linux/arch/x86/mm/ |
| H A D | tlb.c | 118 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); in kern_pcid() 131 VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT)); in kern_pcid() 168 VM_WARN_ON_ONCE(asid != 0); in build_cr3() 182 VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID)); in build_cr3_noflush() 948 VM_WARN_ON_ONCE(is_global_asid(ns.asid)); in switch_mm_irqs_off() 1716 VM_WARN_ON_ONCE(preemptible()); in __flush_tlb_all() 1772 VM_WARN_ON_ONCE(!loaded_mm); in nmi_uaccess_okay() 1787 VM_WARN_ON_ONCE(__pa(current_mm->pgd) != read_cr3_pa()); in nmi_uaccess_okay()
|
| H A D | pgtable.c | 523 VM_WARN_ON_ONCE(!pmd_present(*pmdp)); in pmdp_invalidate_ad() 538 VM_WARN_ON_ONCE(!pud_present(*pudp)); in pudp_invalidate() 831 VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) && in arch_check_zapped_pte() 838 VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) && in arch_check_zapped_pmd() 845 VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) && pud_shstk(pud)); in arch_check_zapped_pud()
|
| /linux/arch/riscv/mm/ |
| H A D | pgtable.c | 159 VM_WARN_ON_ONCE(!pud_present(*pudp)); in pudp_invalidate()
|
| /linux/arch/powerpc/mm/book3s64/ |
| H A D | pgtable.c | 184 VM_WARN_ON_ONCE(!pmd_present(*pmdp)); in pmdp_invalidate() 195 VM_WARN_ON_ONCE(!pud_present(*pudp)); in pudp_invalidate()
|
| /linux/arch/powerpc/include/asm/ |
| H A D | mmu_context.h | 127 VM_WARN_ON_ONCE(atomic_read(&mm->context.active_cpus) <= 0); in dec_mm_active_cpus()
|