Lines Matching +full:no +full:- +full:idle +full:- +full:on +full:- +full:init
1 /*-
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
79 check_cpu_switched(int c, cpuset_t *csp, uint64_t *swt, bool init)
88 if (pc->pc_curthread == pc->pc_idlethread) {
95 * pc_curthread with non-idle thread pointer is visible before
100 sw = pc->pc_switchtime;
101 if (init)
110 * sync_core) on current CPU as well. There is no guarantee that
113 * might be not provided by the syscall return. E.g. on amd64 we
130 td->td_retval[0] = MEMBARRIER_SUPPORTED_CMDS;
134 p = td->td_proc;
158 if ((td->td_proc->p_flag2 & P2_MEMBAR_GLOBE) == 0) {
163 td1 = cpuid_to_pcpu[c]->pc_curthread;
164 p1 = td1->td_proc;
166 (p1->p_flag2 & P2_MEMBAR_GLOBE) != 0)
174 if ((p->p_flag2 & P2_MEMBAR_GLOBE) == 0) {
176 p->p_flag2 |= P2_MEMBAR_GLOBE;
182 if ((td->td_proc->p_flag2 & P2_MEMBAR_PRIVE) == 0) {
185 pmap_active_cpus(vmspace_pmap(p->p_vmspace), &cs);
191 if ((p->p_flag2 & P2_MEMBAR_PRIVE) == 0) {
193 p->p_flag2 |= P2_MEMBAR_PRIVE;
199 if ((td->td_proc->p_flag2 & P2_MEMBAR_PRIVE_SYNCORE) == 0) {
205 * cpu_sync_core() on CPUs that were missed
208 * on amd64 because threads always use slow
212 pmap_active_cpus(vmspace_pmap(p->p_vmspace), &cs);
220 if ((p->p_flag2 & P2_MEMBAR_PRIVE_SYNCORE) == 0) {
222 p->p_flag2 |= P2_MEMBAR_PRIVE_SYNCORE;
238 return (kern_membarrier(td, uap->cmd, uap->flags, uap->cpu_id));