Lines Matching +full:mm +full:- +full:0

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2010-2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
17 * int x = 0, y = 0;
22 * CPU1 after the IPI-induced memory barrier:
29 * b: send IPI IPI-induced mb
36 * BUG_ON(r1 == 0 && r2 == 0)
41 * can be reordered after (a) (although not after (c)), so we get r1 == 0
42 * and r2 == 0. This violates the guarantee that membarrier() is
46 * before the IPI-induced memory barrier on CPU1.
53 * int x = 0, y = 0;
68 * b: send IPI IPI-induced mb
71 * BUG_ON(r1 == 0 && r2 == 1)
76 * before (b) (although not before (a)), so we get "r1 = 0". This violates
80 * after the IPI-induced memory barrier on CPU1.
82 * C) Scheduling userspace thread -> kthread -> userspace thread vs membarrier
89 * b: read rq->curr->mm == NULL
110 * e: current->mm = NULL
111 * b: read rq->curr->mm == NULL
124 * e: current->mm = NULL
125 * b: read rq->curr->mm == NULL
127 * f: current->mm = mm
145 #define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK 0
153 #define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK 0
178 * become visible to membarrier()'s caller -- see scenario B in in ipi_sync_core()
205 struct mm_struct *mm = (struct mm_struct *) info; in ipi_sync_rq_state() local
207 if (current->mm != mm) in ipi_sync_rq_state()
210 atomic_read(&mm->membarrier_state)); in ipi_sync_rq_state()
220 void membarrier_exec_mmap(struct mm_struct *mm) in membarrier_exec_mmap() argument
228 atomic_set(&mm->membarrier_state, 0); in membarrier_exec_mmap()
230 * Keep the runqueue membarrier_state in sync with this mm in membarrier_exec_mmap()
233 this_cpu_write(runqueues.membarrier_state, 0); in membarrier_exec_mmap()
239 int membarrier_state = 0; in membarrier_update_current_mm()
242 membarrier_state = atomic_read(&next_mm->membarrier_state); in membarrier_update_current_mm()
243 if (READ_ONCE(rq->membarrier_state) == membarrier_state) in membarrier_update_current_mm()
245 WRITE_ONCE(rq->membarrier_state, membarrier_state); in membarrier_update_current_mm()
254 return 0; in membarrier_global_expedited()
257 * Matches memory barriers after rq->curr modification in in membarrier_global_expedited()
263 return -ENOMEM; in membarrier_global_expedited()
282 if (!(READ_ONCE(cpu_rq(cpu)->membarrier_state) & in membarrier_global_expedited()
288 * a task mm. in membarrier_global_expedited()
290 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_global_expedited()
291 if (!p->mm) in membarrier_global_expedited()
308 * rq->curr modification in scheduler. in membarrier_global_expedited()
311 return 0; in membarrier_global_expedited()
317 struct mm_struct *mm = current->mm; in membarrier_private_expedited() local
322 return -EINVAL; in membarrier_private_expedited()
323 if (!(atomic_read(&mm->membarrier_state) & in membarrier_private_expedited()
325 return -EPERM; in membarrier_private_expedited()
327 prepare_sync_core_cmd(mm); in membarrier_private_expedited()
330 return -EINVAL; in membarrier_private_expedited()
331 if (!(atomic_read(&mm->membarrier_state) & in membarrier_private_expedited()
333 return -EPERM; in membarrier_private_expedited()
337 if (!(atomic_read(&mm->membarrier_state) & in membarrier_private_expedited()
339 return -EPERM; in membarrier_private_expedited()
343 (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1)) in membarrier_private_expedited()
344 return 0; in membarrier_private_expedited()
347 * Matches memory barriers after rq->curr modification in in membarrier_private_expedited()
350 * On RISC-V, this barrier pairing is also needed for the in membarrier_private_expedited()
356 if (cpu_id < 0 && !zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) in membarrier_private_expedited()
357 return -ENOMEM; in membarrier_private_expedited()
362 if (cpu_id >= 0) { in membarrier_private_expedited()
368 p = rcu_dereference(cpu_rq(cpu_id)->curr); in membarrier_private_expedited()
369 if (!p || p->mm != mm) { in membarrier_private_expedited()
381 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_private_expedited()
382 if (p && p->mm == mm) in membarrier_private_expedited()
388 if (cpu_id >= 0) { in membarrier_private_expedited()
397 * skipping the current cpu -- we're about to do smp_mb() in membarrier_private_expedited()
402 * For SYNC_CORE, we do need a barrier on the current cpu -- in membarrier_private_expedited()
404 * task in the same mm just before, during, or after in membarrier_private_expedited()
405 * membarrier, we will end up with some thread in the mm in membarrier_private_expedited()
422 if (cpu_id < 0) in membarrier_private_expedited()
429 * rq->curr modification in scheduler. in membarrier_private_expedited()
433 return 0; in membarrier_private_expedited()
436 static int sync_runqueues_membarrier_state(struct mm_struct *mm) in sync_runqueues_membarrier_state() argument
438 int membarrier_state = atomic_read(&mm->membarrier_state); in sync_runqueues_membarrier_state()
442 if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1) { in sync_runqueues_membarrier_state()
446 * For single mm user, we can simply issue a memory barrier in sync_runqueues_membarrier_state()
448 * mm and in the current runqueue to guarantee that no memory in sync_runqueues_membarrier_state()
453 return 0; in sync_runqueues_membarrier_state()
457 return -ENOMEM; in sync_runqueues_membarrier_state()
460 * For mm with multiple users, we need to ensure all future in sync_runqueues_membarrier_state()
461 * scheduler executions will observe @mm's new membarrier in sync_runqueues_membarrier_state()
467 * For each cpu runqueue, if the task's mm match @mm, ensure that all in sync_runqueues_membarrier_state()
468 * @mm's membarrier state set bits are also set in the runqueue's in sync_runqueues_membarrier_state()
470 * between threads which are users of @mm has its membarrier state in sync_runqueues_membarrier_state()
480 p = rcu_dereference(rq->curr); in sync_runqueues_membarrier_state()
481 if (p && p->mm == mm) in sync_runqueues_membarrier_state()
486 on_each_cpu_mask(tmpmask, ipi_sync_rq_state, mm, true); in sync_runqueues_membarrier_state()
491 return 0; in sync_runqueues_membarrier_state()
497 struct mm_struct *mm = p->mm; in membarrier_register_global_expedited() local
500 if (atomic_read(&mm->membarrier_state) & in membarrier_register_global_expedited()
502 return 0; in membarrier_register_global_expedited()
503 atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED, &mm->membarrier_state); in membarrier_register_global_expedited()
504 ret = sync_runqueues_membarrier_state(mm); in membarrier_register_global_expedited()
508 &mm->membarrier_state); in membarrier_register_global_expedited()
510 return 0; in membarrier_register_global_expedited()
516 struct mm_struct *mm = p->mm; in membarrier_register_private_expedited() local
523 return -EINVAL; in membarrier_register_private_expedited()
528 return -EINVAL; in membarrier_register_private_expedited()
537 * groups, which use the same mm. (CLONE_VM but not in membarrier_register_private_expedited()
540 if ((atomic_read(&mm->membarrier_state) & ready_state) == ready_state) in membarrier_register_private_expedited()
541 return 0; in membarrier_register_private_expedited()
546 atomic_or(set_state, &mm->membarrier_state); in membarrier_register_private_expedited()
547 ret = sync_runqueues_membarrier_state(mm); in membarrier_register_private_expedited()
550 atomic_or(ready_state, &mm->membarrier_state); in membarrier_register_private_expedited()
552 return 0; in membarrier_register_private_expedited()
558 struct mm_struct *mm = p->mm; in membarrier_get_registrations() local
559 int registrations_mask = 0, membarrier_state, i; in membarrier_get_registrations()
578 membarrier_state = atomic_read(&mm->membarrier_state); in membarrier_get_registrations()
579 for (i = 0; i < ARRAY_SIZE(states); ++i) { in membarrier_get_registrations()
585 WARN_ON_ONCE(membarrier_state != 0); in membarrier_get_registrations()
590 * sys_membarrier - issue memory barriers on a set of threads
592 * @flags: Currently needs to be 0 for all commands other than
601 * If this system call is not implemented, -ENOSYS is returned. If the
604 * returns -EINVAL. For a given command, with flags argument set to 0,
605 * if this system call returns -ENOSYS or -EINVAL, it is guaranteed to
607 * -ENOMEM if there is not enough memory available to perform the system
630 return -EINVAL; in SYSCALL_DEFINE3()
634 return -EINVAL; in SYSCALL_DEFINE3()
638 cpu_id = -1; in SYSCALL_DEFINE3()
652 return -EINVAL; in SYSCALL_DEFINE3()
655 return 0; in SYSCALL_DEFINE3()
661 return membarrier_private_expedited(0, cpu_id); in SYSCALL_DEFINE3()
663 return membarrier_register_private_expedited(0); in SYSCALL_DEFINE3()
675 return -EINVAL; in SYSCALL_DEFINE3()