Lines Matching refs:mm_cid

10760 	struct mm_mm_cid *mc = &mm->mm_cid;  in mm_update_max_cids()
10763 lockdep_assert_held(&mm->mm_cid.lock); in mm_update_max_cids()
10801 if (!mm || !READ_ONCE(mm->mm_cid.users)) in mm_update_cpus_allowed()
10807 mc = &mm->mm_cid; in mm_update_cpus_allowed()
10841 WRITE_ONCE(mm->mm_cid.mode, mode); in mm_cid_complete_transit()
10846 if (cid_on_cpu(t->mm_cid.cid)) { in mm_cid_transit_to_task()
10847 unsigned int cid = cpu_cid_to_cid(t->mm_cid.cid); in mm_cid_transit_to_task()
10849 t->mm_cid.cid = cid_to_transit_cid(cid); in mm_cid_transit_to_task()
10850 pcp->cid = t->mm_cid.cid; in mm_cid_transit_to_task()
10860 struct mm_cid_pcpu *pcp = per_cpu_ptr(mm->mm_cid.pcpu, cpu); in mm_cid_fixup_cpus_to_tasks()
10871 if (rq->curr->mm == mm && rq->curr->mm_cid.active) in mm_cid_fixup_cpus_to_tasks()
10876 } else if (rq->curr->mm == mm && rq->curr->mm_cid.active) { in mm_cid_fixup_cpus_to_tasks()
10877 unsigned int cid = rq->curr->mm_cid.cid; in mm_cid_fixup_cpus_to_tasks()
10882 rq->curr->mm_cid.cid = cid; in mm_cid_fixup_cpus_to_tasks()
10892 if (cid_on_task(t->mm_cid.cid)) { in mm_cid_transit_to_cpu()
10893 t->mm_cid.cid = cid_to_transit_cid(t->mm_cid.cid); in mm_cid_transit_to_cpu()
10894 pcp->cid = t->mm_cid.cid; in mm_cid_transit_to_cpu()
10902 if (cid_on_task(t->mm_cid.cid)) { in mm_cid_fixup_task_to_cpu()
10905 mm_cid_transit_to_cpu(t, per_cpu_ptr(mm->mm_cid.pcpu, task_cpu(t))); in mm_cid_fixup_task_to_cpu()
10916 lockdep_assert_held(&mm->mm_cid.mutex); in mm_cid_fixup_tasks_to_cpus()
10918 hlist_for_each_entry(t, &mm->mm_cid.user_list, mm_cid.node) { in mm_cid_fixup_tasks_to_cpus()
10929 lockdep_assert_held(&mm->mm_cid.lock); in sched_mm_cid_add_user()
10931 t->mm_cid.active = 1; in sched_mm_cid_add_user()
10932 hlist_add_head(&t->mm_cid.node, &mm->mm_cid.user_list); in sched_mm_cid_add_user()
10933 mm->mm_cid.users++; in sched_mm_cid_add_user()
10945 WARN_ON_ONCE(t->mm_cid.cid != MM_CID_UNSET); in sched_mm_cid_fork()
10947 guard(mutex)(&mm->mm_cid.mutex); in sched_mm_cid_fork()
10948 scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) { in sched_mm_cid_fork()
10949 struct mm_cid_pcpu *pcp = this_cpu_ptr(mm->mm_cid.pcpu); in sched_mm_cid_fork()
10952 if (!mm->mm_cid.users) { in sched_mm_cid_fork()
10954 t->mm_cid.cid = mm_get_cid(mm); in sched_mm_cid_fork()
10956 pcp->cid = t->mm_cid.cid; in sched_mm_cid_fork()
10961 if (!cid_on_cpu(mm->mm_cid.mode)) in sched_mm_cid_fork()
10962 t->mm_cid.cid = mm_get_cid(mm); in sched_mm_cid_fork()
10967 percpu = cid_on_cpu(mm->mm_cid.mode); in sched_mm_cid_fork()
10978 t->mm_cid.cid = mm_get_cid(mm); in sched_mm_cid_fork()
10984 lockdep_assert_held(&t->mm->mm_cid.lock); in sched_mm_cid_remove_user()
10986 t->mm_cid.active = 0; in sched_mm_cid_remove_user()
10988 t->mm_cid.cid = cid_from_transit_cid(t->mm_cid.cid); in sched_mm_cid_remove_user()
10990 hlist_del_init(&t->mm_cid.node); in sched_mm_cid_remove_user()
10991 t->mm->mm_cid.users--; in sched_mm_cid_remove_user()
11007 if (WARN_ON_ONCE(cid_on_cpu(mm->mm_cid.mode))) in __sched_mm_cid_exit()
11027 if (!mm || !t->mm_cid.active) in sched_mm_cid_exit()
11034 scoped_guard(mutex, &mm->mm_cid.mutex) { in sched_mm_cid_exit()
11036 if (likely(mm->mm_cid.users > 1)) { in sched_mm_cid_exit()
11037 scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) { in sched_mm_cid_exit()
11046 mm_drop_cid_on_cpu(mm, this_cpu_ptr(mm->mm_cid.pcpu)); in sched_mm_cid_exit()
11052 scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) { in sched_mm_cid_exit()
11055 mm_cid_transit_to_task(t, this_cpu_ptr(mm->mm_cid.pcpu)); in sched_mm_cid_exit()
11069 irq_work_sync(&mm->mm_cid.irq_work); in sched_mm_cid_exit()
11070 cancel_work_sync(&mm->mm_cid.work); in sched_mm_cid_exit()
11088 struct mm_struct *mm = container_of(work, struct mm_struct, mm_cid.work); in mm_cid_work_fn()
11090 guard(mutex)(&mm->mm_cid.mutex); in mm_cid_work_fn()
11092 if (!mm->mm_cid.users) in mm_cid_work_fn()
11095 scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) { in mm_cid_work_fn()
11097 if (!mm->mm_cid.update_deferred) in mm_cid_work_fn()
11103 if (WARN_ON_ONCE(cid_on_cpu(mm->mm_cid.mode))) in mm_cid_work_fn()
11111 struct mm_struct *mm = container_of(work, struct mm_struct, mm_cid.irq_work); in mm_cid_irq_work()
11118 schedule_work(&mm->mm_cid.work); in mm_cid_irq_work()
11123 mm->mm_cid.max_cids = 0; in mm_init_cid()
11124 mm->mm_cid.mode = 0; in mm_init_cid()
11125 mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed; in mm_init_cid()
11126 mm->mm_cid.users = 0; in mm_init_cid()
11127 mm->mm_cid.pcpu_thrs = 0; in mm_init_cid()
11128 mm->mm_cid.update_deferred = 0; in mm_init_cid()
11129 raw_spin_lock_init(&mm->mm_cid.lock); in mm_init_cid()
11130 mutex_init(&mm->mm_cid.mutex); in mm_init_cid()
11131 mm->mm_cid.irq_work = IRQ_WORK_INIT_HARD(mm_cid_irq_work); in mm_init_cid()
11132 INIT_WORK(&mm->mm_cid.work, mm_cid_work_fn); in mm_init_cid()
11133 INIT_HLIST_HEAD(&mm->mm_cid.user_list); in mm_init_cid()