Lines Matching full:mm
24 void __check_vmalloc_seq(struct mm_struct *mm);
27 static inline void check_vmalloc_seq(struct mm_struct *mm) in check_vmalloc_seq() argument
30 unlikely(atomic_read(&mm->context.vmalloc_seq) != in check_vmalloc_seq()
32 __check_vmalloc_seq(mm); in check_vmalloc_seq()
38 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
42 init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument
44 atomic64_set(&mm->context.id, 0); in init_new_context()
49 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
52 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, in a15_erratum_get_cpumask() argument
62 static inline void check_and_switch_context(struct mm_struct *mm, in check_and_switch_context() argument
65 check_vmalloc_seq(mm); in check_and_switch_context()
71 * running with the old mm. Since we only support UP systems in check_and_switch_context()
72 * on non-ASID CPUs, the old mm will remain valid until the in check_and_switch_context()
75 mm->context.switch_pending = 1; in check_and_switch_context()
77 cpu_switch_mm(mm->pgd, mm); in check_and_switch_context()
85 struct mm_struct *mm = current->mm; in finish_arch_post_lock_switch() local
87 if (mm && mm->context.switch_pending) { in finish_arch_post_lock_switch()
92 * switch to this mm was already done. in finish_arch_post_lock_switch()
95 if (mm->context.switch_pending) { in finish_arch_post_lock_switch()
96 mm->context.switch_pending = 0; in finish_arch_post_lock_switch()
97 cpu_switch_mm(mm->pgd, mm); in finish_arch_post_lock_switch()
111 * This is the actual mm switch as far as the scheduler
113 * calling the CPU specific function when the mm hasn't
142 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument
144 if (mm != &init_mm) in enter_lazy_tlb()
145 check_vmalloc_seq(mm); in enter_lazy_tlb()