Lines Matching defs:subscriptions

70  *  - subscriptions->invalidate_seq & 1 == True (odd)
76 * - subscriptions->invalidate_seq & 1 == False (even)
89 mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)
91 lockdep_assert_held(&subscriptions->lock);
92 return subscriptions->invalidate_seq & 1;
96 mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions,
103 spin_lock(&subscriptions->lock);
104 subscriptions->active_invalidate_ranges++;
105 node = interval_tree_iter_first(&subscriptions->itree, range->start,
108 subscriptions->invalidate_seq |= 1;
113 *seq = subscriptions->invalidate_seq;
114 spin_unlock(&subscriptions->lock);
131 static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
136 spin_lock(&subscriptions->lock);
137 if (--subscriptions->active_invalidate_ranges ||
138 !mn_itree_is_invalidating(subscriptions)) {
139 spin_unlock(&subscriptions->lock);
144 subscriptions->invalidate_seq++;
153 &subscriptions->deferred_list,
157 &subscriptions->itree);
160 &subscriptions->itree);
163 spin_unlock(&subscriptions->lock);
165 wake_up_all(&subscriptions->wq);
190 struct mmu_notifier_subscriptions *subscriptions =
200 * subscriptions seq, then it is currently between
210 * seq = ++subscriptions->invalidate_seq
222 * seq = ++subscriptions->invalidate_seq
234 spin_lock(&subscriptions->lock);
237 is_invalidating = seq == subscriptions->invalidate_seq;
238 spin_unlock(&subscriptions->lock);
245 * subscriptions->invalidate_seq is even in the idle state.
250 wait_event(subscriptions->wq,
251 READ_ONCE(subscriptions->invalidate_seq) != seq);
263 static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions,
278 mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
286 mn_itree_inv_end(subscriptions);
301 static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
312 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
323 spin_lock(&subscriptions->lock);
324 while (unlikely(!hlist_empty(&subscriptions->list))) {
325 subscription = hlist_entry(subscriptions->list.first,
335 spin_unlock(&subscriptions->lock);
352 struct mmu_notifier_subscriptions *subscriptions =
355 if (subscriptions->has_itree)
356 mn_itree_release(subscriptions, mm);
358 if (!hlist_empty(&subscriptions->list))
359 mn_hlist_release(subscriptions, mm);
429 static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions,
436 mn_itree_inv_start_range(subscriptions, range, &cur_seq);
456 mn_itree_inv_end(subscriptions);
461 struct mmu_notifier_subscriptions *subscriptions,
469 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
507 hlist_for_each_entry_rcu(subscription, &subscriptions->list,
523 struct mmu_notifier_subscriptions *subscriptions =
527 if (subscriptions->has_itree) {
528 ret = mn_itree_invalidate(subscriptions, range);
532 if (!hlist_empty(&subscriptions->list))
533 return mn_hlist_invalidate_range_start(subscriptions, range);
538 mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
545 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
561 struct mmu_notifier_subscriptions *subscriptions =
565 if (subscriptions->has_itree)
566 mn_itree_inv_end(subscriptions);
568 if (!hlist_empty(&subscriptions->list))
569 mn_hlist_invalidate_end(subscriptions, range);
599 struct mmu_notifier_subscriptions *subscriptions = NULL;
621 subscriptions = kzalloc(
623 if (!subscriptions)
626 INIT_HLIST_HEAD(&subscriptions->list);
627 spin_lock_init(&subscriptions->lock);
628 subscriptions->invalidate_seq = 2;
629 subscriptions->itree = RB_ROOT_CACHED;
630 init_waitqueue_head(&subscriptions->wq);
631 INIT_HLIST_HEAD(&subscriptions->deferred_list);
654 if (subscriptions)
655 smp_store_release(&mm->notifier_subscriptions, subscriptions);
675 kfree(subscriptions);
892 struct mmu_notifier_subscriptions *subscriptions, unsigned long start,
928 spin_lock(&subscriptions->lock);
929 if (subscriptions->active_invalidate_ranges) {
930 if (mn_itree_is_invalidating(subscriptions))
932 &subscriptions->deferred_list);
934 subscriptions->invalidate_seq |= 1;
936 &subscriptions->itree);
938 interval_sub->invalidate_seq = subscriptions->invalidate_seq;
940 WARN_ON(mn_itree_is_invalidating(subscriptions));
948 subscriptions->invalidate_seq - 1;
950 &subscriptions->itree);
952 spin_unlock(&subscriptions->lock);
977 struct mmu_notifier_subscriptions *subscriptions;
982 subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
983 if (!subscriptions || !subscriptions->has_itree) {
987 subscriptions = mm->notifier_subscriptions;
989 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
999 struct mmu_notifier_subscriptions *subscriptions =
1005 if (!subscriptions || !subscriptions->has_itree) {
1009 subscriptions = mm->notifier_subscriptions;
1011 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
1017 mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions,
1022 spin_lock(&subscriptions->lock);
1023 ret = subscriptions->invalidate_seq != seq;
1024 spin_unlock(&subscriptions->lock);
1041 struct mmu_notifier_subscriptions *subscriptions =
1047 spin_lock(&subscriptions->lock);
1048 if (mn_itree_is_invalidating(subscriptions)) {
1057 &subscriptions->deferred_list);
1058 seq = subscriptions->invalidate_seq;
1063 &subscriptions->itree);
1065 spin_unlock(&subscriptions->lock);
1074 wait_event(subscriptions->wq,
1075 mmu_interval_seq_released(subscriptions, seq));