Lines Matching refs:mm

559 void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm)  in dup_mm_exe_file()  argument
564 RCU_INIT_POINTER(mm->exe_file, exe_file); in dup_mm_exe_file()
574 static inline int mm_alloc_pgd(struct mm_struct *mm) in mm_alloc_pgd() argument
576 mm->pgd = pgd_alloc(mm); in mm_alloc_pgd()
577 if (unlikely(!mm->pgd)) in mm_alloc_pgd()
582 static inline void mm_free_pgd(struct mm_struct *mm) in mm_free_pgd() argument
584 pgd_free(mm, mm->pgd); in mm_free_pgd()
587 #define mm_alloc_pgd(mm) (0) argument
588 #define mm_free_pgd(mm) argument
594 static inline int mm_alloc_id(struct mm_struct *mm) in mm_alloc_id() argument
601 mm->mm_id = ret; in mm_alloc_id()
605 static inline void mm_free_id(struct mm_struct *mm) in mm_free_id() argument
607 const mm_id_t id = mm->mm_id; in mm_free_id()
609 mm->mm_id = MM_ID_DUMMY; in mm_free_id()
617 static inline int mm_alloc_id(struct mm_struct *mm) { return 0; } in mm_alloc_id() argument
618 static inline void mm_free_id(struct mm_struct *mm) {} in mm_free_id() argument
621 static void check_mm(struct mm_struct *mm) in check_mm() argument
629 long x = percpu_counter_sum(&mm->rss_stat[i]); in check_mm()
633 mm, resident_page_types[i], x, in check_mm()
639 if (mm_pgtables_bytes(mm)) in check_mm()
641 mm_pgtables_bytes(mm)); in check_mm()
644 VM_BUG_ON_MM(mm->pmd_huge_pte, mm); in check_mm()
649 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) argument
653 struct mm_struct *mm = arg; in do_check_lazy_tlb() local
655 WARN_ON_ONCE(current->active_mm == mm); in do_check_lazy_tlb()
660 struct mm_struct *mm = arg; in do_shoot_lazy_tlb() local
662 if (current->active_mm == mm) { in do_shoot_lazy_tlb()
663 WARN_ON_ONCE(current->mm); in do_shoot_lazy_tlb()
665 switch_mm(mm, &init_mm, current); in do_shoot_lazy_tlb()
669 static void cleanup_lazy_tlbs(struct mm_struct *mm) in cleanup_lazy_tlbs() argument
707 on_each_cpu_mask(mm_cpumask(mm), do_shoot_lazy_tlb, (void *)mm, 1); in cleanup_lazy_tlbs()
709 on_each_cpu(do_check_lazy_tlb, (void *)mm, 1); in cleanup_lazy_tlbs()
717 void __mmdrop(struct mm_struct *mm) in __mmdrop() argument
719 BUG_ON(mm == &init_mm); in __mmdrop()
720 WARN_ON_ONCE(mm == current->mm); in __mmdrop()
723 cleanup_lazy_tlbs(mm); in __mmdrop()
725 WARN_ON_ONCE(mm == current->active_mm); in __mmdrop()
726 mm_free_pgd(mm); in __mmdrop()
727 mm_free_id(mm); in __mmdrop()
728 destroy_context(mm); in __mmdrop()
729 mmu_notifier_subscriptions_destroy(mm); in __mmdrop()
730 check_mm(mm); in __mmdrop()
731 put_user_ns(mm->user_ns); in __mmdrop()
732 mm_pasid_drop(mm); in __mmdrop()
733 mm_destroy_cid(mm); in __mmdrop()
734 percpu_counter_destroy_many(mm->rss_stat, NR_MM_COUNTERS); in __mmdrop()
736 free_mm(mm); in __mmdrop()
742 struct mm_struct *mm; in mmdrop_async_fn() local
744 mm = container_of(work, struct mm_struct, async_put_work); in mmdrop_async_fn()
745 __mmdrop(mm); in mmdrop_async_fn()
748 static void mmdrop_async(struct mm_struct *mm) in mmdrop_async() argument
750 if (unlikely(atomic_dec_and_test(&mm->mm_count))) { in mmdrop_async()
751 INIT_WORK(&mm->async_put_work, mmdrop_async_fn); in mmdrop_async()
752 schedule_work(&mm->async_put_work); in mmdrop_async()
1029 static void mm_init_aio(struct mm_struct *mm) in mm_init_aio() argument
1032 spin_lock_init(&mm->ioctx_lock); in mm_init_aio()
1033 mm->ioctx_table = NULL; in mm_init_aio()
1037 static __always_inline void mm_clear_owner(struct mm_struct *mm, in mm_clear_owner() argument
1041 if (mm->owner == p) in mm_clear_owner()
1042 WRITE_ONCE(mm->owner, NULL); in mm_clear_owner()
1046 static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) in mm_init_owner() argument
1049 mm->owner = p; in mm_init_owner()
1053 static void mm_init_uprobes_state(struct mm_struct *mm) in mm_init_uprobes_state() argument
1056 mm->uprobes_state.xol_area = NULL; in mm_init_uprobes_state()
1057 arch_uprobe_init_state(mm); in mm_init_uprobes_state()
1061 static void mmap_init_lock(struct mm_struct *mm) in mmap_init_lock() argument
1063 init_rwsem(&mm->mmap_lock); in mmap_init_lock()
1064 mm_lock_seqcount_init(mm); in mmap_init_lock()
1066 rcuwait_init(&mm->vma_writer_wait); in mmap_init_lock()
1070 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, in mm_init() argument
1073 mt_init_flags(&mm->mm_mt, MM_MT_FLAGS); in mm_init()
1074 mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock); in mm_init()
1075 atomic_set(&mm->mm_users, 1); in mm_init()
1076 atomic_set(&mm->mm_count, 1); in mm_init()
1077 seqcount_init(&mm->write_protect_seq); in mm_init()
1078 mmap_init_lock(mm); in mm_init()
1079 INIT_LIST_HEAD(&mm->mmlist); in mm_init()
1080 mm_pgtables_bytes_init(mm); in mm_init()
1081 mm->map_count = 0; in mm_init()
1082 mm->locked_vm = 0; in mm_init()
1083 atomic64_set(&mm->pinned_vm, 0); in mm_init()
1084 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); in mm_init()
1085 spin_lock_init(&mm->page_table_lock); in mm_init()
1086 spin_lock_init(&mm->arg_lock); in mm_init()
1087 mm_init_cpumask(mm); in mm_init()
1088 mm_init_aio(mm); in mm_init()
1089 mm_init_owner(mm, p); in mm_init()
1090 mm_pasid_init(mm); in mm_init()
1091 RCU_INIT_POINTER(mm->exe_file, NULL); in mm_init()
1092 mmu_notifier_subscriptions_init(mm); in mm_init()
1093 init_tlb_flush_pending(mm); in mm_init()
1095 mm->pmd_huge_pte = NULL; in mm_init()
1097 mm_init_uprobes_state(mm); in mm_init()
1098 hugetlb_count_init(mm); in mm_init()
1100 mm_flags_clear_all(mm); in mm_init()
1101 if (current->mm) { in mm_init()
1102 unsigned long flags = __mm_flags_get_word(current->mm); in mm_init()
1104 __mm_flags_overwrite_word(mm, mmf_init_legacy_flags(flags)); in mm_init()
1105 mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; in mm_init()
1107 __mm_flags_overwrite_word(mm, default_dump_filter); in mm_init()
1108 mm->def_flags = 0; in mm_init()
1111 if (futex_mm_init(mm)) in mm_init()
1114 if (mm_alloc_pgd(mm)) in mm_init()
1117 if (mm_alloc_id(mm)) in mm_init()
1120 if (init_new_context(p, mm)) in mm_init()
1123 if (mm_alloc_cid(mm, p)) in mm_init()
1126 if (percpu_counter_init_many(mm->rss_stat, 0, GFP_KERNEL_ACCOUNT, in mm_init()
1130 mm->user_ns = get_user_ns(user_ns); in mm_init()
1131 lru_gen_init_mm(mm); in mm_init()
1132 return mm; in mm_init()
1135 mm_destroy_cid(mm); in mm_init()
1137 destroy_context(mm); in mm_init()
1139 mm_free_id(mm); in mm_init()
1141 mm_free_pgd(mm); in mm_init()
1143 futex_hash_free(mm); in mm_init()
1145 free_mm(mm); in mm_init()
1154 struct mm_struct *mm; in mm_alloc() local
1156 mm = allocate_mm(); in mm_alloc()
1157 if (!mm) in mm_alloc()
1160 memset(mm, 0, sizeof(*mm)); in mm_alloc()
1161 return mm_init(mm, current, current_user_ns()); in mm_alloc()
1165 static inline void __mmput(struct mm_struct *mm) in __mmput() argument
1167 VM_BUG_ON(atomic_read(&mm->mm_users)); in __mmput()
1169 uprobe_clear_state(mm); in __mmput()
1170 exit_aio(mm); in __mmput()
1171 ksm_exit(mm); in __mmput()
1172 khugepaged_exit(mm); /* must run before exit_mmap */ in __mmput()
1173 exit_mmap(mm); in __mmput()
1174 mm_put_huge_zero_folio(mm); in __mmput()
1175 set_mm_exe_file(mm, NULL); in __mmput()
1176 if (!list_empty(&mm->mmlist)) { in __mmput()
1178 list_del(&mm->mmlist); in __mmput()
1181 if (mm->binfmt) in __mmput()
1182 module_put(mm->binfmt->module); in __mmput()
1183 lru_gen_del_mm(mm); in __mmput()
1184 futex_hash_free(mm); in __mmput()
1185 mmdrop(mm); in __mmput()
1191 void mmput(struct mm_struct *mm) in mmput() argument
1195 if (atomic_dec_and_test(&mm->mm_users)) in mmput()
1196 __mmput(mm); in mmput()
1203 struct mm_struct *mm = container_of(work, struct mm_struct, in mmput_async_fn() local
1206 __mmput(mm); in mmput_async_fn()
1209 void mmput_async(struct mm_struct *mm) in mmput_async() argument
1211 if (atomic_dec_and_test(&mm->mm_users)) { in mmput_async()
1212 INIT_WORK(&mm->async_put_work, mmput_async_fn); in mmput_async()
1213 schedule_work(&mm->async_put_work); in mmput_async()
1232 int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) in set_mm_exe_file() argument
1241 old_exe_file = rcu_dereference_raw(mm->exe_file); in set_mm_exe_file()
1252 rcu_assign_pointer(mm->exe_file, new_exe_file); in set_mm_exe_file()
1269 int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) in replace_mm_exe_file() argument
1276 old_exe_file = get_mm_exe_file(mm); in replace_mm_exe_file()
1278 VMA_ITERATOR(vmi, mm, 0); in replace_mm_exe_file()
1279 mmap_read_lock(mm); in replace_mm_exe_file()
1289 mmap_read_unlock(mm); in replace_mm_exe_file()
1301 mmap_write_lock(mm); in replace_mm_exe_file()
1302 old_exe_file = rcu_dereference_raw(mm->exe_file); in replace_mm_exe_file()
1303 rcu_assign_pointer(mm->exe_file, new_exe_file); in replace_mm_exe_file()
1304 mmap_write_unlock(mm); in replace_mm_exe_file()
1320 struct file *get_mm_exe_file(struct mm_struct *mm) in get_mm_exe_file() argument
1325 exe_file = get_file_rcu(&mm->exe_file); in get_mm_exe_file()
1341 struct mm_struct *mm; in get_task_exe_file() local
1347 mm = task->mm; in get_task_exe_file()
1348 if (mm) in get_task_exe_file()
1349 exe_file = get_mm_exe_file(mm); in get_task_exe_file()
1366 struct mm_struct *mm; in get_task_mm() local
1372 mm = task->mm; in get_task_mm()
1373 if (mm) in get_task_mm()
1374 mmget(mm); in get_task_mm()
1376 return mm; in get_task_mm()
1380 static bool may_access_mm(struct mm_struct *mm, struct task_struct *task, unsigned int mode) in may_access_mm() argument
1382 if (mm == current->mm) in may_access_mm()
1393 struct mm_struct *mm; in mm_access() local
1400 mm = get_task_mm(task); in mm_access()
1401 if (!mm) { in mm_access()
1402 mm = ERR_PTR(-ESRCH); in mm_access()
1403 } else if (!may_access_mm(mm, task, mode)) { in mm_access()
1404 mmput(mm); in mm_access()
1405 mm = ERR_PTR(-EACCES); in mm_access()
1409 return mm; in mm_access()
1458 static void mm_release(struct task_struct *tsk, struct mm_struct *mm) in mm_release() argument
1463 deactivate_mm(tsk, mm); in mm_release()
1471 if (atomic_read(&mm->mm_users) > 1) { in mm_release()
1491 void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm) in exit_mm_release() argument
1494 mm_release(tsk, mm); in exit_mm_release()
1497 void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm) in exec_mm_release() argument
1500 mm_release(tsk, mm); in exec_mm_release()
1516 struct mm_struct *mm; in dup_mm() local
1519 mm = allocate_mm(); in dup_mm()
1520 if (!mm) in dup_mm()
1523 memcpy(mm, oldmm, sizeof(*mm)); in dup_mm()
1525 if (!mm_init(mm, tsk, mm->user_ns)) in dup_mm()
1529 err = dup_mmap(mm, oldmm); in dup_mm()
1534 mm->hiwater_rss = get_mm_rss(mm); in dup_mm()
1535 mm->hiwater_vm = mm->total_vm; in dup_mm()
1537 if (mm->binfmt && !try_module_get(mm->binfmt->module)) in dup_mm()
1540 return mm; in dup_mm()
1544 mm->binfmt = NULL; in dup_mm()
1545 mm_init_owner(mm, NULL); in dup_mm()
1546 mmput(mm); in dup_mm()
1556 struct mm_struct *mm, *oldmm; in copy_mm() local
1565 tsk->mm = NULL; in copy_mm()
1573 oldmm = current->mm; in copy_mm()
1579 mm = oldmm; in copy_mm()
1581 mm = dup_mm(tsk, current->mm); in copy_mm()
1582 if (!mm) in copy_mm()
1586 tsk->mm = mm; in copy_mm()
1587 tsk->active_mm = mm; in copy_mm()
1926 if (!tsk->mm) in copy_oom_score_adj()
1935 mm_flags_set(MMF_MULTIPROCESS, tsk->mm); in copy_oom_score_adj()
2498 if (p->mm) { in copy_process()
2500 mm_clear_owner(p->mm, p); in copy_process()
2501 mmput(p->mm); in copy_process()
2678 lru_gen_add_mm(p->mm); in kernel_clone()