Lines Matching +full:p +full:- +full:states

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2010-2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
22 * CPU1 after the IPI-induced memory barrier:
29 * b: send IPI IPI-induced mb
46 * before the IPI-induced memory barrier on CPU1.
68 * b: send IPI IPI-induced mb
80 * after the IPI-induced memory barrier on CPU1.
82 * C) Scheduling userspace thread -> kthread -> userspace thread vs membarrier
89 * b: read rq->curr->mm == NULL
110 * e: current->mm = NULL
111 * b: read rq->curr->mm == NULL
124 * e: current->mm = NULL
125 * b: read rq->curr->mm == NULL
127 * f: current->mm = mm
178 * become visible to membarrier()'s caller -- see scenario B in in ipi_sync_core()
207 if (current->mm != mm) in ipi_sync_rq_state()
210 atomic_read(&mm->membarrier_state)); in ipi_sync_rq_state()
228 atomic_set(&mm->membarrier_state, 0); in membarrier_exec_mmap()
242 membarrier_state = atomic_read(&next_mm->membarrier_state); in membarrier_update_current_mm()
243 if (READ_ONCE(rq->membarrier_state) == membarrier_state) in membarrier_update_current_mm()
245 WRITE_ONCE(rq->membarrier_state, membarrier_state); in membarrier_update_current_mm()
257 * Matches memory barriers after rq->curr modification in in membarrier_global_expedited()
263 return -ENOMEM; in membarrier_global_expedited()
269 struct task_struct *p; in membarrier_global_expedited() local
282 if (!(READ_ONCE(cpu_rq(cpu)->membarrier_state) & in membarrier_global_expedited()
290 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_global_expedited()
291 if (!p->mm) in membarrier_global_expedited()
308 * rq->curr modification in scheduler. in membarrier_global_expedited()
317 struct mm_struct *mm = current->mm; in membarrier_private_expedited()
322 return -EINVAL; in membarrier_private_expedited()
323 if (!(atomic_read(&mm->membarrier_state) & in membarrier_private_expedited()
325 return -EPERM; in membarrier_private_expedited()
330 return -EINVAL; in membarrier_private_expedited()
331 if (!(atomic_read(&mm->membarrier_state) & in membarrier_private_expedited()
333 return -EPERM; in membarrier_private_expedited()
337 if (!(atomic_read(&mm->membarrier_state) & in membarrier_private_expedited()
339 return -EPERM; in membarrier_private_expedited()
343 (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1)) in membarrier_private_expedited()
347 * Matches memory barriers after rq->curr modification in in membarrier_private_expedited()
350 * On RISC-V, this barrier pairing is also needed for the in membarrier_private_expedited()
357 return -ENOMEM; in membarrier_private_expedited()
363 struct task_struct *p; in membarrier_private_expedited() local
368 p = rcu_dereference(cpu_rq(cpu_id)->curr); in membarrier_private_expedited()
369 if (!p || p->mm != mm) { in membarrier_private_expedited()
379 struct task_struct *p; in membarrier_private_expedited() local
381 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_private_expedited()
382 if (p && p->mm == mm) in membarrier_private_expedited()
397 * skipping the current cpu -- we're about to do smp_mb() in membarrier_private_expedited()
402 * For SYNC_CORE, we do need a barrier on the current cpu -- in membarrier_private_expedited()
429 * rq->curr modification in scheduler. in membarrier_private_expedited()
438 int membarrier_state = atomic_read(&mm->membarrier_state); in sync_runqueues_membarrier_state()
442 if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1) { in sync_runqueues_membarrier_state()
457 return -ENOMEM; in sync_runqueues_membarrier_state()
478 struct task_struct *p; in sync_runqueues_membarrier_state() local
480 p = rcu_dereference(rq->curr); in sync_runqueues_membarrier_state()
481 if (p && p->mm == mm) in sync_runqueues_membarrier_state()
496 struct task_struct *p = current; in membarrier_register_global_expedited() local
497 struct mm_struct *mm = p->mm; in membarrier_register_global_expedited()
500 if (atomic_read(&mm->membarrier_state) & in membarrier_register_global_expedited()
503 atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED, &mm->membarrier_state); in membarrier_register_global_expedited()
508 &mm->membarrier_state); in membarrier_register_global_expedited()
515 struct task_struct *p = current; in membarrier_register_private_expedited() local
516 struct mm_struct *mm = p->mm; in membarrier_register_private_expedited()
523 return -EINVAL; in membarrier_register_private_expedited()
528 return -EINVAL; in membarrier_register_private_expedited()
540 if ((atomic_read(&mm->membarrier_state) & ready_state) == ready_state) in membarrier_register_private_expedited()
546 atomic_or(set_state, &mm->membarrier_state); in membarrier_register_private_expedited()
550 atomic_or(ready_state, &mm->membarrier_state); in membarrier_register_private_expedited()
557 struct task_struct *p = current; in membarrier_get_registrations() local
558 struct mm_struct *mm = p->mm; in membarrier_get_registrations()
560 static const int states[] = { in membarrier_get_registrations() local
576 BUILD_BUG_ON(ARRAY_SIZE(states) != ARRAY_SIZE(registration_cmds)); in membarrier_get_registrations()
578 membarrier_state = atomic_read(&mm->membarrier_state); in membarrier_get_registrations()
579 for (i = 0; i < ARRAY_SIZE(states); ++i) { in membarrier_get_registrations()
580 if (membarrier_state & states[i]) { in membarrier_get_registrations()
582 membarrier_state &= ~states[i]; in membarrier_get_registrations()
590 * sys_membarrier - issue memory barriers on a set of threads
601 * If this system call is not implemented, -ENOSYS is returned. If the
604 * returns -EINVAL. For a given command, with flags argument set to 0,
605 * if this system call returns -ENOSYS or -EINVAL, it is guaranteed to
607 * -ENOMEM if there is not enough memory available to perform the system
630 return -EINVAL; in SYSCALL_DEFINE3()
634 return -EINVAL; in SYSCALL_DEFINE3()
638 cpu_id = -1; in SYSCALL_DEFINE3()
652 return -EINVAL; in SYSCALL_DEFINE3()
675 return -EINVAL; in SYSCALL_DEFINE3()