Lines Matching full:mm
91 * b: read rq->curr->mm == NULL
112 * e: current->mm = NULL
113 * b: read rq->curr->mm == NULL
126 * e: current->mm = NULL
127 * b: read rq->curr->mm == NULL
129 * f: current->mm = mm
216 struct mm_struct *mm = (struct mm_struct *) info; in ipi_sync_rq_state() local
218 if (current->mm != mm) in ipi_sync_rq_state()
221 atomic_read(&mm->membarrier_state)); in ipi_sync_rq_state()
231 void membarrier_exec_mmap(struct mm_struct *mm) in membarrier_exec_mmap() argument
239 atomic_set(&mm->membarrier_state, 0); in membarrier_exec_mmap()
241 * Keep the runqueue membarrier_state in sync with this mm in membarrier_exec_mmap()
299 * a task mm. in membarrier_global_expedited()
302 if (!p->mm) in membarrier_global_expedited()
328 struct mm_struct *mm = current->mm; in membarrier_private_expedited() local
334 if (!(atomic_read(&mm->membarrier_state) & in membarrier_private_expedited()
338 prepare_sync_core_cmd(mm); in membarrier_private_expedited()
342 if (!(atomic_read(&mm->membarrier_state) & in membarrier_private_expedited()
348 if (!(atomic_read(&mm->membarrier_state) & in membarrier_private_expedited()
354 (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1)) in membarrier_private_expedited()
380 if (!p || p->mm != mm) { in membarrier_private_expedited()
393 if (p && p->mm == mm) in membarrier_private_expedited()
415 * task in the same mm just before, during, or after in membarrier_private_expedited()
416 * membarrier, we will end up with some thread in the mm in membarrier_private_expedited()
447 static int sync_runqueues_membarrier_state(struct mm_struct *mm) in sync_runqueues_membarrier_state() argument
449 int membarrier_state = atomic_read(&mm->membarrier_state); in sync_runqueues_membarrier_state()
453 if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1) { in sync_runqueues_membarrier_state()
457 * For single mm user, we can simply issue a memory barrier in sync_runqueues_membarrier_state()
459 * mm and in the current runqueue to guarantee that no memory in sync_runqueues_membarrier_state()
471 * For mm with multiple users, we need to ensure all future in sync_runqueues_membarrier_state()
472 * scheduler executions will observe @mm's new membarrier in sync_runqueues_membarrier_state()
478 * For each cpu runqueue, if the task's mm match @mm, ensure that all in sync_runqueues_membarrier_state()
479 * @mm's membarrier state set bits are also set in the runqueue's in sync_runqueues_membarrier_state()
481 * between threads which are users of @mm has its membarrier state in sync_runqueues_membarrier_state()
492 if (p && p->mm == mm) in sync_runqueues_membarrier_state()
497 on_each_cpu_mask(tmpmask, ipi_sync_rq_state, mm, true); in sync_runqueues_membarrier_state()
508 struct mm_struct *mm = p->mm; in membarrier_register_global_expedited() local
511 if (atomic_read(&mm->membarrier_state) & in membarrier_register_global_expedited()
514 atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED, &mm->membarrier_state); in membarrier_register_global_expedited()
515 ret = sync_runqueues_membarrier_state(mm); in membarrier_register_global_expedited()
519 &mm->membarrier_state); in membarrier_register_global_expedited()
527 struct mm_struct *mm = p->mm; in membarrier_register_private_expedited() local
548 * groups, which use the same mm. (CLONE_VM but not in membarrier_register_private_expedited()
551 if ((atomic_read(&mm->membarrier_state) & ready_state) == ready_state) in membarrier_register_private_expedited()
557 atomic_or(set_state, &mm->membarrier_state); in membarrier_register_private_expedited()
558 ret = sync_runqueues_membarrier_state(mm); in membarrier_register_private_expedited()
561 atomic_or(ready_state, &mm->membarrier_state); in membarrier_register_private_expedited()
569 struct mm_struct *mm = p->mm; in membarrier_get_registrations() local
589 membarrier_state = atomic_read(&mm->membarrier_state); in membarrier_get_registrations()