120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2cddb8a5cSAndrea Arcangeli /* 3cddb8a5cSAndrea Arcangeli * linux/mm/mmu_notifier.c 4cddb8a5cSAndrea Arcangeli * 5cddb8a5cSAndrea Arcangeli * Copyright (C) 2008 Qumranet, Inc. 6cddb8a5cSAndrea Arcangeli * Copyright (C) 2008 SGI 793e205a7SChristoph Lameter * Christoph Lameter <cl@linux.com> 8cddb8a5cSAndrea Arcangeli */ 9cddb8a5cSAndrea Arcangeli 10cddb8a5cSAndrea Arcangeli #include <linux/rculist.h> 11cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 12b95f1b31SPaul Gortmaker #include <linux/export.h> 13cddb8a5cSAndrea Arcangeli #include <linux/mm.h> 14cddb8a5cSAndrea Arcangeli #include <linux/err.h> 1599cb252fSJason Gunthorpe #include <linux/interval_tree.h> 1621a92735SSagi Grimberg #include <linux/srcu.h> 17cddb8a5cSAndrea Arcangeli #include <linux/rcupdate.h> 18cddb8a5cSAndrea Arcangeli #include <linux/sched.h> 196e84f315SIngo Molnar #include <linux/sched/mm.h> 205a0e3ad6STejun Heo #include <linux/slab.h> 21cddb8a5cSAndrea Arcangeli 2221a92735SSagi Grimberg /* global SRCU for all MMs */ 23dde8da6cSPaul E. McKenney DEFINE_STATIC_SRCU(srcu); 2421a92735SSagi Grimberg 2523b68395SDaniel Vetter #ifdef CONFIG_LOCKDEP 2623b68395SDaniel Vetter struct lockdep_map __mmu_notifier_invalidate_range_start_map = { 2723b68395SDaniel Vetter .name = "mmu_notifier_invalidate_range_start" 2823b68395SDaniel Vetter }; 2923b68395SDaniel Vetter #endif 3023b68395SDaniel Vetter 31cddb8a5cSAndrea Arcangeli /* 32984cfe4eSJason Gunthorpe * The mmu_notifier_subscriptions structure is allocated and installed in 33984cfe4eSJason Gunthorpe * mm->notifier_subscriptions inside the mm_take_all_locks() protected 3456f434f4SJason Gunthorpe * critical section and it's released only when mm_count reaches zero 3556f434f4SJason Gunthorpe * in mmdrop(). 3656f434f4SJason Gunthorpe */ 37984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions { 3856f434f4SJason Gunthorpe /* all mmu notifiers registered in this mm are queued in this list */ 3956f434f4SJason Gunthorpe struct hlist_head list; 4099cb252fSJason Gunthorpe bool has_itree; 4156f434f4SJason Gunthorpe /* to serialize the list modifications and hlist_unhashed */ 4256f434f4SJason Gunthorpe spinlock_t lock; 4399cb252fSJason Gunthorpe unsigned long invalidate_seq; 4499cb252fSJason Gunthorpe unsigned long active_invalidate_ranges; 4599cb252fSJason Gunthorpe struct rb_root_cached itree; 4699cb252fSJason Gunthorpe wait_queue_head_t wq; 4799cb252fSJason Gunthorpe struct hlist_head deferred_list; 4856f434f4SJason Gunthorpe }; 4956f434f4SJason Gunthorpe 5056f434f4SJason Gunthorpe /* 5199cb252fSJason Gunthorpe * This is a collision-retry read-side/write-side 'lock', a lot like a 5299cb252fSJason Gunthorpe * seqcount, however this allows multiple write-sides to hold it at 5399cb252fSJason Gunthorpe * once. Conceptually the write side is protecting the values of the PTEs in 5499cb252fSJason Gunthorpe * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any 5599cb252fSJason Gunthorpe * writer exists. 5699cb252fSJason Gunthorpe * 5799cb252fSJason Gunthorpe * Note that the core mm creates nested invalidate_range_start()/end() regions 5899cb252fSJason Gunthorpe * within the same thread, and runs invalidate_range_start()/end() in parallel 5999cb252fSJason Gunthorpe * on multiple CPUs. This is designed to not reduce concurrency or block 6099cb252fSJason Gunthorpe * progress on the mm side. 6199cb252fSJason Gunthorpe * 6299cb252fSJason Gunthorpe * As a secondary function, holding the full write side also serves to prevent 6399cb252fSJason Gunthorpe * writers for the itree, this is an optimization to avoid extra locking 6499cb252fSJason Gunthorpe * during invalidate_range_start/end notifiers. 6599cb252fSJason Gunthorpe * 6699cb252fSJason Gunthorpe * The write side has two states, fully excluded: 6799cb252fSJason Gunthorpe * - mm->active_invalidate_ranges != 0 68984cfe4eSJason Gunthorpe * - subscriptions->invalidate_seq & 1 == True (odd) 6999cb252fSJason Gunthorpe * - some range on the mm_struct is being invalidated 7099cb252fSJason Gunthorpe * - the itree is not allowed to change 7199cb252fSJason Gunthorpe * 7299cb252fSJason Gunthorpe * And partially excluded: 7399cb252fSJason Gunthorpe * - mm->active_invalidate_ranges != 0 74984cfe4eSJason Gunthorpe * - subscriptions->invalidate_seq & 1 == False (even) 7599cb252fSJason Gunthorpe * - some range on the mm_struct is being invalidated 7699cb252fSJason Gunthorpe * - the itree is allowed to change 7799cb252fSJason Gunthorpe * 78984cfe4eSJason Gunthorpe * Operations on notifier_subscriptions->invalidate_seq (under spinlock): 7999cb252fSJason Gunthorpe * seq |= 1 # Begin writing 8099cb252fSJason Gunthorpe * seq++ # Release the writing state 8199cb252fSJason Gunthorpe * seq & 1 # True if a writer exists 8299cb252fSJason Gunthorpe * 8399cb252fSJason Gunthorpe * The later state avoids some expensive work on inv_end in the common case of 845292e24aSJason Gunthorpe * no mmu_interval_notifier monitoring the VA. 8599cb252fSJason Gunthorpe */ 86984cfe4eSJason Gunthorpe static bool 87984cfe4eSJason Gunthorpe mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions) 8899cb252fSJason Gunthorpe { 89984cfe4eSJason Gunthorpe lockdep_assert_held(&subscriptions->lock); 90984cfe4eSJason Gunthorpe return subscriptions->invalidate_seq & 1; 9199cb252fSJason Gunthorpe } 9299cb252fSJason Gunthorpe 9399cb252fSJason Gunthorpe static struct mmu_interval_notifier * 94984cfe4eSJason Gunthorpe mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions, 9599cb252fSJason Gunthorpe const struct mmu_notifier_range *range, 9699cb252fSJason Gunthorpe unsigned long *seq) 9799cb252fSJason Gunthorpe { 9899cb252fSJason Gunthorpe struct interval_tree_node *node; 9999cb252fSJason Gunthorpe struct mmu_interval_notifier *res = NULL; 10099cb252fSJason Gunthorpe 101984cfe4eSJason Gunthorpe spin_lock(&subscriptions->lock); 102984cfe4eSJason Gunthorpe subscriptions->active_invalidate_ranges++; 103984cfe4eSJason Gunthorpe node = interval_tree_iter_first(&subscriptions->itree, range->start, 10499cb252fSJason Gunthorpe range->end - 1); 10599cb252fSJason Gunthorpe if (node) { 106984cfe4eSJason Gunthorpe subscriptions->invalidate_seq |= 1; 10799cb252fSJason Gunthorpe res = container_of(node, struct mmu_interval_notifier, 10899cb252fSJason Gunthorpe interval_tree); 10999cb252fSJason Gunthorpe } 11099cb252fSJason Gunthorpe 111984cfe4eSJason Gunthorpe *seq = subscriptions->invalidate_seq; 112984cfe4eSJason Gunthorpe spin_unlock(&subscriptions->lock); 11399cb252fSJason Gunthorpe return res; 11499cb252fSJason Gunthorpe } 11599cb252fSJason Gunthorpe 11699cb252fSJason Gunthorpe static struct mmu_interval_notifier * 1175292e24aSJason Gunthorpe mn_itree_inv_next(struct mmu_interval_notifier *interval_sub, 11899cb252fSJason Gunthorpe const struct mmu_notifier_range *range) 11999cb252fSJason Gunthorpe { 12099cb252fSJason Gunthorpe struct interval_tree_node *node; 12199cb252fSJason Gunthorpe 1225292e24aSJason Gunthorpe node = interval_tree_iter_next(&interval_sub->interval_tree, 1235292e24aSJason Gunthorpe range->start, range->end - 1); 12499cb252fSJason Gunthorpe if (!node) 12599cb252fSJason Gunthorpe return NULL; 12699cb252fSJason Gunthorpe return container_of(node, struct mmu_interval_notifier, interval_tree); 12799cb252fSJason Gunthorpe } 12899cb252fSJason Gunthorpe 129984cfe4eSJason Gunthorpe static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions) 13099cb252fSJason Gunthorpe { 1315292e24aSJason Gunthorpe struct mmu_interval_notifier *interval_sub; 13299cb252fSJason Gunthorpe struct hlist_node *next; 13399cb252fSJason Gunthorpe 134984cfe4eSJason Gunthorpe spin_lock(&subscriptions->lock); 135984cfe4eSJason Gunthorpe if (--subscriptions->active_invalidate_ranges || 136984cfe4eSJason Gunthorpe !mn_itree_is_invalidating(subscriptions)) { 137984cfe4eSJason Gunthorpe spin_unlock(&subscriptions->lock); 13899cb252fSJason Gunthorpe return; 13999cb252fSJason Gunthorpe } 14099cb252fSJason Gunthorpe 14199cb252fSJason Gunthorpe /* Make invalidate_seq even */ 142984cfe4eSJason Gunthorpe subscriptions->invalidate_seq++; 14399cb252fSJason Gunthorpe 14499cb252fSJason Gunthorpe /* 14599cb252fSJason Gunthorpe * The inv_end incorporates a deferred mechanism like rtnl_unlock(). 14699cb252fSJason Gunthorpe * Adds and removes are queued until the final inv_end happens then 14799cb252fSJason Gunthorpe * they are progressed. This arrangement for tree updates is used to 14899cb252fSJason Gunthorpe * avoid using a blocking lock during invalidate_range_start. 14999cb252fSJason Gunthorpe */ 1505292e24aSJason Gunthorpe hlist_for_each_entry_safe(interval_sub, next, 1515292e24aSJason Gunthorpe &subscriptions->deferred_list, 15299cb252fSJason Gunthorpe deferred_item) { 1535292e24aSJason Gunthorpe if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) 1545292e24aSJason Gunthorpe interval_tree_insert(&interval_sub->interval_tree, 155984cfe4eSJason Gunthorpe &subscriptions->itree); 15699cb252fSJason Gunthorpe else 1575292e24aSJason Gunthorpe interval_tree_remove(&interval_sub->interval_tree, 158984cfe4eSJason Gunthorpe &subscriptions->itree); 1595292e24aSJason Gunthorpe hlist_del(&interval_sub->deferred_item); 16099cb252fSJason Gunthorpe } 161984cfe4eSJason Gunthorpe spin_unlock(&subscriptions->lock); 16299cb252fSJason Gunthorpe 163984cfe4eSJason Gunthorpe wake_up_all(&subscriptions->wq); 16499cb252fSJason Gunthorpe } 16599cb252fSJason Gunthorpe 16699cb252fSJason Gunthorpe /** 16799cb252fSJason Gunthorpe * mmu_interval_read_begin - Begin a read side critical section against a VA 16899cb252fSJason Gunthorpe * range 169d49653f3SKrzysztof Kozlowski * @interval_sub: The interval subscription 17099cb252fSJason Gunthorpe * 17199cb252fSJason Gunthorpe * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a 1725292e24aSJason Gunthorpe * collision-retry scheme similar to seqcount for the VA range under 1735292e24aSJason Gunthorpe * subscription. If the mm invokes invalidation during the critical section 1745292e24aSJason Gunthorpe * then mmu_interval_read_retry() will return true. 17599cb252fSJason Gunthorpe * 17699cb252fSJason Gunthorpe * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs 17799cb252fSJason Gunthorpe * require a blocking context. The critical region formed by this can sleep, 17899cb252fSJason Gunthorpe * and the required 'user_lock' can also be a sleeping lock. 17999cb252fSJason Gunthorpe * 18099cb252fSJason Gunthorpe * The caller is required to provide a 'user_lock' to serialize both teardown 18199cb252fSJason Gunthorpe * and setup. 18299cb252fSJason Gunthorpe * 18399cb252fSJason Gunthorpe * The return value should be passed to mmu_interval_read_retry(). 18499cb252fSJason Gunthorpe */ 1855292e24aSJason Gunthorpe unsigned long 1865292e24aSJason Gunthorpe mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub) 18799cb252fSJason Gunthorpe { 188984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions *subscriptions = 1895292e24aSJason Gunthorpe interval_sub->mm->notifier_subscriptions; 19099cb252fSJason Gunthorpe unsigned long seq; 19199cb252fSJason Gunthorpe bool is_invalidating; 19299cb252fSJason Gunthorpe 19399cb252fSJason Gunthorpe /* 1945292e24aSJason Gunthorpe * If the subscription has a different seq value under the user_lock 1955292e24aSJason Gunthorpe * than we started with then it has collided. 19699cb252fSJason Gunthorpe * 1975292e24aSJason Gunthorpe * If the subscription currently has the same seq value as the 1985292e24aSJason Gunthorpe * subscriptions seq, then it is currently between 1995292e24aSJason Gunthorpe * invalidate_start/end and is colliding. 20099cb252fSJason Gunthorpe * 20199cb252fSJason Gunthorpe * The locking looks broadly like this: 20257b037dbSAlistair Popple * mn_itree_inv_start(): mmu_interval_read_begin(): 20399cb252fSJason Gunthorpe * spin_lock 2045292e24aSJason Gunthorpe * seq = READ_ONCE(interval_sub->invalidate_seq); 205984cfe4eSJason Gunthorpe * seq == subs->invalidate_seq 20699cb252fSJason Gunthorpe * spin_unlock 20799cb252fSJason Gunthorpe * spin_lock 208984cfe4eSJason Gunthorpe * seq = ++subscriptions->invalidate_seq 20999cb252fSJason Gunthorpe * spin_unlock 21057b037dbSAlistair Popple * op->invalidate(): 21199cb252fSJason Gunthorpe * user_lock 21299cb252fSJason Gunthorpe * mmu_interval_set_seq() 2135292e24aSJason Gunthorpe * interval_sub->invalidate_seq = seq 21499cb252fSJason Gunthorpe * user_unlock 21599cb252fSJason Gunthorpe * 21699cb252fSJason Gunthorpe * [Required: mmu_interval_read_retry() == true] 21799cb252fSJason Gunthorpe * 21899cb252fSJason Gunthorpe * mn_itree_inv_end(): 21999cb252fSJason Gunthorpe * spin_lock 220984cfe4eSJason Gunthorpe * seq = ++subscriptions->invalidate_seq 22199cb252fSJason Gunthorpe * spin_unlock 22299cb252fSJason Gunthorpe * 22399cb252fSJason Gunthorpe * user_lock 22499cb252fSJason Gunthorpe * mmu_interval_read_retry(): 2255292e24aSJason Gunthorpe * interval_sub->invalidate_seq != seq 22699cb252fSJason Gunthorpe * user_unlock 22799cb252fSJason Gunthorpe * 22899cb252fSJason Gunthorpe * Barriers are not needed here as any races here are closed by an 22999cb252fSJason Gunthorpe * eventual mmu_interval_read_retry(), which provides a barrier via the 23099cb252fSJason Gunthorpe * user_lock. 23199cb252fSJason Gunthorpe */ 232984cfe4eSJason Gunthorpe spin_lock(&subscriptions->lock); 23399cb252fSJason Gunthorpe /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */ 2345292e24aSJason Gunthorpe seq = READ_ONCE(interval_sub->invalidate_seq); 235984cfe4eSJason Gunthorpe is_invalidating = seq == subscriptions->invalidate_seq; 236984cfe4eSJason Gunthorpe spin_unlock(&subscriptions->lock); 23799cb252fSJason Gunthorpe 23899cb252fSJason Gunthorpe /* 2395292e24aSJason Gunthorpe * interval_sub->invalidate_seq must always be set to an odd value via 24099cb252fSJason Gunthorpe * mmu_interval_set_seq() using the provided cur_seq from 24199cb252fSJason Gunthorpe * mn_itree_inv_start_range(). This ensures that if seq does wrap we 24299cb252fSJason Gunthorpe * will always clear the below sleep in some reasonable time as 243984cfe4eSJason Gunthorpe * subscriptions->invalidate_seq is even in the idle state. 24499cb252fSJason Gunthorpe */ 24599cb252fSJason Gunthorpe lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 24699cb252fSJason Gunthorpe lock_map_release(&__mmu_notifier_invalidate_range_start_map); 24799cb252fSJason Gunthorpe if (is_invalidating) 248984cfe4eSJason Gunthorpe wait_event(subscriptions->wq, 249984cfe4eSJason Gunthorpe READ_ONCE(subscriptions->invalidate_seq) != seq); 25099cb252fSJason Gunthorpe 25199cb252fSJason Gunthorpe /* 25299cb252fSJason Gunthorpe * Notice that mmu_interval_read_retry() can already be true at this 25399cb252fSJason Gunthorpe * point, avoiding loops here allows the caller to provide a global 25499cb252fSJason Gunthorpe * time bound. 25599cb252fSJason Gunthorpe */ 25699cb252fSJason Gunthorpe 25799cb252fSJason Gunthorpe return seq; 25899cb252fSJason Gunthorpe } 25999cb252fSJason Gunthorpe EXPORT_SYMBOL_GPL(mmu_interval_read_begin); 26099cb252fSJason Gunthorpe 261984cfe4eSJason Gunthorpe static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions, 26299cb252fSJason Gunthorpe struct mm_struct *mm) 26399cb252fSJason Gunthorpe { 26499cb252fSJason Gunthorpe struct mmu_notifier_range range = { 26599cb252fSJason Gunthorpe .flags = MMU_NOTIFIER_RANGE_BLOCKABLE, 26699cb252fSJason Gunthorpe .event = MMU_NOTIFY_RELEASE, 26799cb252fSJason Gunthorpe .mm = mm, 26899cb252fSJason Gunthorpe .start = 0, 26999cb252fSJason Gunthorpe .end = ULONG_MAX, 27099cb252fSJason Gunthorpe }; 2715292e24aSJason Gunthorpe struct mmu_interval_notifier *interval_sub; 27299cb252fSJason Gunthorpe unsigned long cur_seq; 27399cb252fSJason Gunthorpe bool ret; 27499cb252fSJason Gunthorpe 2755292e24aSJason Gunthorpe for (interval_sub = 2765292e24aSJason Gunthorpe mn_itree_inv_start_range(subscriptions, &range, &cur_seq); 2775292e24aSJason Gunthorpe interval_sub; 2785292e24aSJason Gunthorpe interval_sub = mn_itree_inv_next(interval_sub, &range)) { 2795292e24aSJason Gunthorpe ret = interval_sub->ops->invalidate(interval_sub, &range, 2805292e24aSJason Gunthorpe cur_seq); 28199cb252fSJason Gunthorpe WARN_ON(!ret); 28299cb252fSJason Gunthorpe } 28399cb252fSJason Gunthorpe 284984cfe4eSJason Gunthorpe mn_itree_inv_end(subscriptions); 28599cb252fSJason Gunthorpe } 28699cb252fSJason Gunthorpe 28799cb252fSJason Gunthorpe /* 288cddb8a5cSAndrea Arcangeli * This function can't run concurrently against mmu_notifier_register 289cddb8a5cSAndrea Arcangeli * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap 290cddb8a5cSAndrea Arcangeli * runs with mm_users == 0. Other tasks may still invoke mmu notifiers 291cddb8a5cSAndrea Arcangeli * in parallel despite there being no task using this mm any more, 292cddb8a5cSAndrea Arcangeli * through the vmas outside of the exit_mmap context, such as with 293cddb8a5cSAndrea Arcangeli * vmtruncate. This serializes against mmu_notifier_unregister with 294984cfe4eSJason Gunthorpe * the notifier_subscriptions->lock in addition to SRCU and it serializes 295984cfe4eSJason Gunthorpe * against the other mmu notifiers with SRCU. struct mmu_notifier_subscriptions 296cddb8a5cSAndrea Arcangeli * can't go away from under us as exit_mmap holds an mm_count pin 297cddb8a5cSAndrea Arcangeli * itself. 298cddb8a5cSAndrea Arcangeli */ 299984cfe4eSJason Gunthorpe static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions, 30099cb252fSJason Gunthorpe struct mm_struct *mm) 301cddb8a5cSAndrea Arcangeli { 3021991722aSJason Gunthorpe struct mmu_notifier *subscription; 30321a92735SSagi Grimberg int id; 3043ad3d901SXiao Guangrong 3053ad3d901SXiao Guangrong /* 306d34883d4SXiao Guangrong * SRCU here will block mmu_notifier_unregister until 307d34883d4SXiao Guangrong * ->release returns. 3083ad3d901SXiao Guangrong */ 30921a92735SSagi Grimberg id = srcu_read_lock(&srcu); 31063886badSQian Cai hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, 31163886badSQian Cai srcu_read_lock_held(&srcu)) 312d34883d4SXiao Guangrong /* 313d34883d4SXiao Guangrong * If ->release runs before mmu_notifier_unregister it must be 314d34883d4SXiao Guangrong * handled, as it's the only way for the driver to flush all 315d34883d4SXiao Guangrong * existing sptes and stop the driver from establishing any more 316d34883d4SXiao Guangrong * sptes before all the pages in the mm are freed. 317d34883d4SXiao Guangrong */ 3181991722aSJason Gunthorpe if (subscription->ops->release) 3191991722aSJason Gunthorpe subscription->ops->release(subscription, mm); 320d34883d4SXiao Guangrong 321984cfe4eSJason Gunthorpe spin_lock(&subscriptions->lock); 322984cfe4eSJason Gunthorpe while (unlikely(!hlist_empty(&subscriptions->list))) { 3231991722aSJason Gunthorpe subscription = hlist_entry(subscriptions->list.first, 3241991722aSJason Gunthorpe struct mmu_notifier, hlist); 325cddb8a5cSAndrea Arcangeli /* 326d34883d4SXiao Guangrong * We arrived before mmu_notifier_unregister so 327d34883d4SXiao Guangrong * mmu_notifier_unregister will do nothing other than to wait 328d34883d4SXiao Guangrong * for ->release to finish and for mmu_notifier_unregister to 329d34883d4SXiao Guangrong * return. 330cddb8a5cSAndrea Arcangeli */ 3311991722aSJason Gunthorpe hlist_del_init_rcu(&subscription->hlist); 332cddb8a5cSAndrea Arcangeli } 333984cfe4eSJason Gunthorpe spin_unlock(&subscriptions->lock); 334b972216eSPeter Zijlstra srcu_read_unlock(&srcu, id); 335cddb8a5cSAndrea Arcangeli 336cddb8a5cSAndrea Arcangeli /* 337d34883d4SXiao Guangrong * synchronize_srcu here prevents mmu_notifier_release from returning to 338d34883d4SXiao Guangrong * exit_mmap (which would proceed with freeing all pages in the mm) 339d34883d4SXiao Guangrong * until the ->release method returns, if it was invoked by 340d34883d4SXiao Guangrong * mmu_notifier_unregister. 341d34883d4SXiao Guangrong * 342984cfe4eSJason Gunthorpe * The notifier_subscriptions can't go away from under us because 343984cfe4eSJason Gunthorpe * one mm_count is held by exit_mmap. 344cddb8a5cSAndrea Arcangeli */ 34521a92735SSagi Grimberg synchronize_srcu(&srcu); 346cddb8a5cSAndrea Arcangeli } 347cddb8a5cSAndrea Arcangeli 34899cb252fSJason Gunthorpe void __mmu_notifier_release(struct mm_struct *mm) 34999cb252fSJason Gunthorpe { 350984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions *subscriptions = 351984cfe4eSJason Gunthorpe mm->notifier_subscriptions; 35299cb252fSJason Gunthorpe 353984cfe4eSJason Gunthorpe if (subscriptions->has_itree) 354984cfe4eSJason Gunthorpe mn_itree_release(subscriptions, mm); 35599cb252fSJason Gunthorpe 356984cfe4eSJason Gunthorpe if (!hlist_empty(&subscriptions->list)) 357984cfe4eSJason Gunthorpe mn_hlist_release(subscriptions, mm); 35899cb252fSJason Gunthorpe } 35999cb252fSJason Gunthorpe 360cddb8a5cSAndrea Arcangeli /* 361cddb8a5cSAndrea Arcangeli * If no young bitflag is supported by the hardware, ->clear_flush_young can 362cddb8a5cSAndrea Arcangeli * unmap the address and return 1 or 0 depending if the mapping previously 363cddb8a5cSAndrea Arcangeli * existed or not. 364cddb8a5cSAndrea Arcangeli */ 365cddb8a5cSAndrea Arcangeli int __mmu_notifier_clear_flush_young(struct mm_struct *mm, 36657128468SAndres Lagar-Cavilla unsigned long start, 36757128468SAndres Lagar-Cavilla unsigned long end) 368cddb8a5cSAndrea Arcangeli { 3691991722aSJason Gunthorpe struct mmu_notifier *subscription; 37021a92735SSagi Grimberg int young = 0, id; 371cddb8a5cSAndrea Arcangeli 37221a92735SSagi Grimberg id = srcu_read_lock(&srcu); 3731991722aSJason Gunthorpe hlist_for_each_entry_rcu(subscription, 37463886badSQian Cai &mm->notifier_subscriptions->list, hlist, 37563886badSQian Cai srcu_read_lock_held(&srcu)) { 3761991722aSJason Gunthorpe if (subscription->ops->clear_flush_young) 3771991722aSJason Gunthorpe young |= subscription->ops->clear_flush_young( 3781991722aSJason Gunthorpe subscription, mm, start, end); 379cddb8a5cSAndrea Arcangeli } 38021a92735SSagi Grimberg srcu_read_unlock(&srcu, id); 381cddb8a5cSAndrea Arcangeli 382cddb8a5cSAndrea Arcangeli return young; 383cddb8a5cSAndrea Arcangeli } 384cddb8a5cSAndrea Arcangeli 3851d7715c6SVladimir Davydov int __mmu_notifier_clear_young(struct mm_struct *mm, 3861d7715c6SVladimir Davydov unsigned long start, 3871d7715c6SVladimir Davydov unsigned long end) 3881d7715c6SVladimir Davydov { 3891991722aSJason Gunthorpe struct mmu_notifier *subscription; 3901d7715c6SVladimir Davydov int young = 0, id; 3911d7715c6SVladimir Davydov 3921d7715c6SVladimir Davydov id = srcu_read_lock(&srcu); 3931991722aSJason Gunthorpe hlist_for_each_entry_rcu(subscription, 39463886badSQian Cai &mm->notifier_subscriptions->list, hlist, 39563886badSQian Cai srcu_read_lock_held(&srcu)) { 3961991722aSJason Gunthorpe if (subscription->ops->clear_young) 3971991722aSJason Gunthorpe young |= subscription->ops->clear_young(subscription, 3981991722aSJason Gunthorpe mm, start, end); 3991d7715c6SVladimir Davydov } 4001d7715c6SVladimir Davydov srcu_read_unlock(&srcu, id); 4011d7715c6SVladimir Davydov 4021d7715c6SVladimir Davydov return young; 4031d7715c6SVladimir Davydov } 4041d7715c6SVladimir Davydov 4058ee53820SAndrea Arcangeli int __mmu_notifier_test_young(struct mm_struct *mm, 4068ee53820SAndrea Arcangeli unsigned long address) 4078ee53820SAndrea Arcangeli { 4081991722aSJason Gunthorpe struct mmu_notifier *subscription; 40921a92735SSagi Grimberg int young = 0, id; 4108ee53820SAndrea Arcangeli 41121a92735SSagi Grimberg id = srcu_read_lock(&srcu); 4121991722aSJason Gunthorpe hlist_for_each_entry_rcu(subscription, 41363886badSQian Cai &mm->notifier_subscriptions->list, hlist, 41463886badSQian Cai srcu_read_lock_held(&srcu)) { 4151991722aSJason Gunthorpe if (subscription->ops->test_young) { 4161991722aSJason Gunthorpe young = subscription->ops->test_young(subscription, mm, 4171991722aSJason Gunthorpe address); 4188ee53820SAndrea Arcangeli if (young) 4198ee53820SAndrea Arcangeli break; 4208ee53820SAndrea Arcangeli } 4218ee53820SAndrea Arcangeli } 42221a92735SSagi Grimberg srcu_read_unlock(&srcu, id); 4238ee53820SAndrea Arcangeli 4248ee53820SAndrea Arcangeli return young; 4258ee53820SAndrea Arcangeli } 4268ee53820SAndrea Arcangeli 427828502d3SIzik Eidus void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, 428828502d3SIzik Eidus pte_t pte) 429828502d3SIzik Eidus { 4301991722aSJason Gunthorpe struct mmu_notifier *subscription; 43121a92735SSagi Grimberg int id; 432828502d3SIzik Eidus 43321a92735SSagi Grimberg id = srcu_read_lock(&srcu); 4341991722aSJason Gunthorpe hlist_for_each_entry_rcu(subscription, 43563886badSQian Cai &mm->notifier_subscriptions->list, hlist, 43663886badSQian Cai srcu_read_lock_held(&srcu)) { 4371991722aSJason Gunthorpe if (subscription->ops->change_pte) 4381991722aSJason Gunthorpe subscription->ops->change_pte(subscription, mm, address, 4391991722aSJason Gunthorpe pte); 440828502d3SIzik Eidus } 44121a92735SSagi Grimberg srcu_read_unlock(&srcu, id); 442828502d3SIzik Eidus } 443828502d3SIzik Eidus 444984cfe4eSJason Gunthorpe static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions, 44599cb252fSJason Gunthorpe const struct mmu_notifier_range *range) 44699cb252fSJason Gunthorpe { 4475292e24aSJason Gunthorpe struct mmu_interval_notifier *interval_sub; 44899cb252fSJason Gunthorpe unsigned long cur_seq; 44999cb252fSJason Gunthorpe 4505292e24aSJason Gunthorpe for (interval_sub = 4515292e24aSJason Gunthorpe mn_itree_inv_start_range(subscriptions, range, &cur_seq); 4525292e24aSJason Gunthorpe interval_sub; 4535292e24aSJason Gunthorpe interval_sub = mn_itree_inv_next(interval_sub, range)) { 45499cb252fSJason Gunthorpe bool ret; 45599cb252fSJason Gunthorpe 4565292e24aSJason Gunthorpe ret = interval_sub->ops->invalidate(interval_sub, range, 4575292e24aSJason Gunthorpe cur_seq); 45899cb252fSJason Gunthorpe if (!ret) { 45999cb252fSJason Gunthorpe if (WARN_ON(mmu_notifier_range_blockable(range))) 46099cb252fSJason Gunthorpe continue; 46199cb252fSJason Gunthorpe goto out_would_block; 46299cb252fSJason Gunthorpe } 46399cb252fSJason Gunthorpe } 46499cb252fSJason Gunthorpe return 0; 46599cb252fSJason Gunthorpe 46699cb252fSJason Gunthorpe out_would_block: 46799cb252fSJason Gunthorpe /* 46899cb252fSJason Gunthorpe * On -EAGAIN the non-blocking caller is not allowed to call 46999cb252fSJason Gunthorpe * invalidate_range_end() 47099cb252fSJason Gunthorpe */ 471984cfe4eSJason Gunthorpe mn_itree_inv_end(subscriptions); 47299cb252fSJason Gunthorpe return -EAGAIN; 47399cb252fSJason Gunthorpe } 47499cb252fSJason Gunthorpe 475984cfe4eSJason Gunthorpe static int mn_hlist_invalidate_range_start( 476984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions *subscriptions, 47799cb252fSJason Gunthorpe struct mmu_notifier_range *range) 478cddb8a5cSAndrea Arcangeli { 4791991722aSJason Gunthorpe struct mmu_notifier *subscription; 48093065ac7SMichal Hocko int ret = 0; 48121a92735SSagi Grimberg int id; 482cddb8a5cSAndrea Arcangeli 48321a92735SSagi Grimberg id = srcu_read_lock(&srcu); 48463886badSQian Cai hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, 48563886badSQian Cai srcu_read_lock_held(&srcu)) { 4861991722aSJason Gunthorpe const struct mmu_notifier_ops *ops = subscription->ops; 4871991722aSJason Gunthorpe 4881991722aSJason Gunthorpe if (ops->invalidate_range_start) { 489ba170f76SDaniel Vetter int _ret; 490ba170f76SDaniel Vetter 491ba170f76SDaniel Vetter if (!mmu_notifier_range_blockable(range)) 492ba170f76SDaniel Vetter non_block_start(); 4931991722aSJason Gunthorpe _ret = ops->invalidate_range_start(subscription, range); 494ba170f76SDaniel Vetter if (!mmu_notifier_range_blockable(range)) 495ba170f76SDaniel Vetter non_block_end(); 49693065ac7SMichal Hocko if (_ret) { 49793065ac7SMichal Hocko pr_info("%pS callback failed with %d in %sblockable context.\n", 4981991722aSJason Gunthorpe ops->invalidate_range_start, _ret, 4991991722aSJason Gunthorpe !mmu_notifier_range_blockable(range) ? 5001991722aSJason Gunthorpe "non-" : 5011991722aSJason Gunthorpe ""); 5028402ce61SDaniel Vetter WARN_ON(mmu_notifier_range_blockable(range) || 503df2ec764SJason Gunthorpe _ret != -EAGAIN); 504c2655835SSean Christopherson /* 505c2655835SSean Christopherson * We call all the notifiers on any EAGAIN, 506c2655835SSean Christopherson * there is no way for a notifier to know if 507c2655835SSean Christopherson * its start method failed, thus a start that 508c2655835SSean Christopherson * does EAGAIN can't also do end. 509c2655835SSean Christopherson */ 510c2655835SSean Christopherson WARN_ON(ops->invalidate_range_end); 51193065ac7SMichal Hocko ret = _ret; 51293065ac7SMichal Hocko } 51393065ac7SMichal Hocko } 514cddb8a5cSAndrea Arcangeli } 515c2655835SSean Christopherson 516c2655835SSean Christopherson if (ret) { 517c2655835SSean Christopherson /* 518c2655835SSean Christopherson * Must be non-blocking to get here. If there are multiple 519c2655835SSean Christopherson * notifiers and one or more failed start, any that succeeded 520c2655835SSean Christopherson * start are expecting their end to be called. Do so now. 521c2655835SSean Christopherson */ 522c2655835SSean Christopherson hlist_for_each_entry_rcu(subscription, &subscriptions->list, 523c2655835SSean Christopherson hlist, srcu_read_lock_held(&srcu)) { 524c2655835SSean Christopherson if (!subscription->ops->invalidate_range_end) 525c2655835SSean Christopherson continue; 526c2655835SSean Christopherson 527c2655835SSean Christopherson subscription->ops->invalidate_range_end(subscription, 528c2655835SSean Christopherson range); 529c2655835SSean Christopherson } 530c2655835SSean Christopherson } 53121a92735SSagi Grimberg srcu_read_unlock(&srcu, id); 53293065ac7SMichal Hocko 53393065ac7SMichal Hocko return ret; 534cddb8a5cSAndrea Arcangeli } 535cddb8a5cSAndrea Arcangeli 53699cb252fSJason Gunthorpe int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) 53799cb252fSJason Gunthorpe { 538984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions *subscriptions = 539984cfe4eSJason Gunthorpe range->mm->notifier_subscriptions; 54099cb252fSJason Gunthorpe int ret; 54199cb252fSJason Gunthorpe 542984cfe4eSJason Gunthorpe if (subscriptions->has_itree) { 543984cfe4eSJason Gunthorpe ret = mn_itree_invalidate(subscriptions, range); 54499cb252fSJason Gunthorpe if (ret) 54599cb252fSJason Gunthorpe return ret; 54699cb252fSJason Gunthorpe } 547984cfe4eSJason Gunthorpe if (!hlist_empty(&subscriptions->list)) 548984cfe4eSJason Gunthorpe return mn_hlist_invalidate_range_start(subscriptions, range); 54999cb252fSJason Gunthorpe return 0; 55099cb252fSJason Gunthorpe } 55199cb252fSJason Gunthorpe 552984cfe4eSJason Gunthorpe static void 553984cfe4eSJason Gunthorpe mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions, 554ec8832d0SAlistair Popple struct mmu_notifier_range *range) 555cddb8a5cSAndrea Arcangeli { 5561991722aSJason Gunthorpe struct mmu_notifier *subscription; 55721a92735SSagi Grimberg int id; 558cddb8a5cSAndrea Arcangeli 55921a92735SSagi Grimberg id = srcu_read_lock(&srcu); 56063886badSQian Cai hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, 56163886badSQian Cai srcu_read_lock_held(&srcu)) { 5621991722aSJason Gunthorpe if (subscription->ops->invalidate_range_end) { 563ba170f76SDaniel Vetter if (!mmu_notifier_range_blockable(range)) 564ba170f76SDaniel Vetter non_block_start(); 5651991722aSJason Gunthorpe subscription->ops->invalidate_range_end(subscription, 5661991722aSJason Gunthorpe range); 567ba170f76SDaniel Vetter if (!mmu_notifier_range_blockable(range)) 568ba170f76SDaniel Vetter non_block_end(); 569ba170f76SDaniel Vetter } 570cddb8a5cSAndrea Arcangeli } 57121a92735SSagi Grimberg srcu_read_unlock(&srcu, id); 57299cb252fSJason Gunthorpe } 57399cb252fSJason Gunthorpe 574ec8832d0SAlistair Popple void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range) 57599cb252fSJason Gunthorpe { 576984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions *subscriptions = 577984cfe4eSJason Gunthorpe range->mm->notifier_subscriptions; 57899cb252fSJason Gunthorpe 57999cb252fSJason Gunthorpe lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 580984cfe4eSJason Gunthorpe if (subscriptions->has_itree) 581984cfe4eSJason Gunthorpe mn_itree_inv_end(subscriptions); 58299cb252fSJason Gunthorpe 583984cfe4eSJason Gunthorpe if (!hlist_empty(&subscriptions->list)) 584ec8832d0SAlistair Popple mn_hlist_invalidate_end(subscriptions, range); 58523b68395SDaniel Vetter lock_map_release(&__mmu_notifier_invalidate_range_start_map); 586cddb8a5cSAndrea Arcangeli } 587cddb8a5cSAndrea Arcangeli 588*1af5a810SAlistair Popple void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, 5890f0a327fSJoerg Roedel unsigned long start, unsigned long end) 5900f0a327fSJoerg Roedel { 5911991722aSJason Gunthorpe struct mmu_notifier *subscription; 5920f0a327fSJoerg Roedel int id; 5930f0a327fSJoerg Roedel 5940f0a327fSJoerg Roedel id = srcu_read_lock(&srcu); 5951991722aSJason Gunthorpe hlist_for_each_entry_rcu(subscription, 59663886badSQian Cai &mm->notifier_subscriptions->list, hlist, 59763886badSQian Cai srcu_read_lock_held(&srcu)) { 598*1af5a810SAlistair Popple if (subscription->ops->arch_invalidate_secondary_tlbs) 599*1af5a810SAlistair Popple subscription->ops->arch_invalidate_secondary_tlbs( 600*1af5a810SAlistair Popple subscription, mm, 6011991722aSJason Gunthorpe start, end); 6020f0a327fSJoerg Roedel } 6030f0a327fSJoerg Roedel srcu_read_unlock(&srcu, id); 6040f0a327fSJoerg Roedel } 6050f0a327fSJoerg Roedel 60656c57103SJason Gunthorpe /* 607c1e8d7c6SMichel Lespinasse * Same as mmu_notifier_register but here the caller must hold the mmap_lock in 60899cb252fSJason Gunthorpe * write mode. A NULL mn signals the notifier is being registered for itree 60999cb252fSJason Gunthorpe * mode. 61056c57103SJason Gunthorpe */ 6111991722aSJason Gunthorpe int __mmu_notifier_register(struct mmu_notifier *subscription, 6121991722aSJason Gunthorpe struct mm_struct *mm) 613cddb8a5cSAndrea Arcangeli { 614984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions *subscriptions = NULL; 615cddb8a5cSAndrea Arcangeli int ret; 616cddb8a5cSAndrea Arcangeli 61742fc5414SMichel Lespinasse mmap_assert_write_locked(mm); 618cddb8a5cSAndrea Arcangeli BUG_ON(atomic_read(&mm->mm_users) <= 0); 619cddb8a5cSAndrea Arcangeli 620*1af5a810SAlistair Popple /* 621*1af5a810SAlistair Popple * Subsystems should only register for invalidate_secondary_tlbs() or 622*1af5a810SAlistair Popple * invalidate_range_start()/end() callbacks, not both. 623*1af5a810SAlistair Popple */ 624*1af5a810SAlistair Popple if (WARN_ON_ONCE(subscription && 625*1af5a810SAlistair Popple (subscription->ops->arch_invalidate_secondary_tlbs && 626*1af5a810SAlistair Popple (subscription->ops->invalidate_range_start || 627*1af5a810SAlistair Popple subscription->ops->invalidate_range_end)))) 628*1af5a810SAlistair Popple return -EINVAL; 629*1af5a810SAlistair Popple 630984cfe4eSJason Gunthorpe if (!mm->notifier_subscriptions) { 63170df291bSJason Gunthorpe /* 63270df291bSJason Gunthorpe * kmalloc cannot be called under mm_take_all_locks(), but we 633984cfe4eSJason Gunthorpe * know that mm->notifier_subscriptions can't change while we 634c1e8d7c6SMichel Lespinasse * hold the write side of the mmap_lock. 63570df291bSJason Gunthorpe */ 636984cfe4eSJason Gunthorpe subscriptions = kzalloc( 637984cfe4eSJason Gunthorpe sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL); 638984cfe4eSJason Gunthorpe if (!subscriptions) 63956c57103SJason Gunthorpe return -ENOMEM; 64035cfa2b0SGavin Shan 641984cfe4eSJason Gunthorpe INIT_HLIST_HEAD(&subscriptions->list); 642984cfe4eSJason Gunthorpe spin_lock_init(&subscriptions->lock); 643984cfe4eSJason Gunthorpe subscriptions->invalidate_seq = 2; 644984cfe4eSJason Gunthorpe subscriptions->itree = RB_ROOT_CACHED; 645984cfe4eSJason Gunthorpe init_waitqueue_head(&subscriptions->wq); 646984cfe4eSJason Gunthorpe INIT_HLIST_HEAD(&subscriptions->deferred_list); 64770df291bSJason Gunthorpe } 64870df291bSJason Gunthorpe 649cddb8a5cSAndrea Arcangeli ret = mm_take_all_locks(mm); 650cddb8a5cSAndrea Arcangeli if (unlikely(ret)) 65135cfa2b0SGavin Shan goto out_clean; 652cddb8a5cSAndrea Arcangeli 653cddb8a5cSAndrea Arcangeli /* 654cddb8a5cSAndrea Arcangeli * Serialize the update against mmu_notifier_unregister. A 655cddb8a5cSAndrea Arcangeli * side note: mmu_notifier_release can't run concurrently with 656cddb8a5cSAndrea Arcangeli * us because we hold the mm_users pin (either implicitly as 657cddb8a5cSAndrea Arcangeli * current->mm or explicitly with get_task_mm() or similar). 658cddb8a5cSAndrea Arcangeli * We can't race against any other mmu notifier method either 659cddb8a5cSAndrea Arcangeli * thanks to mm_take_all_locks(). 66099cb252fSJason Gunthorpe * 661984cfe4eSJason Gunthorpe * release semantics on the initialization of the 662984cfe4eSJason Gunthorpe * mmu_notifier_subscriptions's contents are provided for unlocked 663984cfe4eSJason Gunthorpe * readers. acquire can only be used while holding the mmgrab or 664984cfe4eSJason Gunthorpe * mmget, and is safe because once created the 665984cfe4eSJason Gunthorpe * mmu_notifier_subscriptions is not freed until the mm is destroyed. 666c1e8d7c6SMichel Lespinasse * As above, users holding the mmap_lock or one of the 66799cb252fSJason Gunthorpe * mm_take_all_locks() do not need to use acquire semantics. 668cddb8a5cSAndrea Arcangeli */ 669984cfe4eSJason Gunthorpe if (subscriptions) 670984cfe4eSJason Gunthorpe smp_store_release(&mm->notifier_subscriptions, subscriptions); 67199cb252fSJason Gunthorpe 6721991722aSJason Gunthorpe if (subscription) { 67399cb252fSJason Gunthorpe /* Pairs with the mmdrop in mmu_notifier_unregister_* */ 67499cb252fSJason Gunthorpe mmgrab(mm); 6751991722aSJason Gunthorpe subscription->mm = mm; 6761991722aSJason Gunthorpe subscription->users = 1; 67770df291bSJason Gunthorpe 678984cfe4eSJason Gunthorpe spin_lock(&mm->notifier_subscriptions->lock); 6791991722aSJason Gunthorpe hlist_add_head_rcu(&subscription->hlist, 680984cfe4eSJason Gunthorpe &mm->notifier_subscriptions->list); 681984cfe4eSJason Gunthorpe spin_unlock(&mm->notifier_subscriptions->lock); 68299cb252fSJason Gunthorpe } else 683984cfe4eSJason Gunthorpe mm->notifier_subscriptions->has_itree = true; 684cddb8a5cSAndrea Arcangeli 685cddb8a5cSAndrea Arcangeli mm_drop_all_locks(mm); 68670df291bSJason Gunthorpe BUG_ON(atomic_read(&mm->mm_users) <= 0); 68770df291bSJason Gunthorpe return 0; 68870df291bSJason Gunthorpe 68935cfa2b0SGavin Shan out_clean: 690984cfe4eSJason Gunthorpe kfree(subscriptions); 691cddb8a5cSAndrea Arcangeli return ret; 692cddb8a5cSAndrea Arcangeli } 69356c57103SJason Gunthorpe EXPORT_SYMBOL_GPL(__mmu_notifier_register); 694cddb8a5cSAndrea Arcangeli 6952c7933f5SJason Gunthorpe /** 6962c7933f5SJason Gunthorpe * mmu_notifier_register - Register a notifier on a mm 697d49653f3SKrzysztof Kozlowski * @subscription: The notifier to attach 6982c7933f5SJason Gunthorpe * @mm: The mm to attach the notifier to 6992c7933f5SJason Gunthorpe * 700c1e8d7c6SMichel Lespinasse * Must not hold mmap_lock nor any other VM related lock when calling 701cddb8a5cSAndrea Arcangeli * this registration function. Must also ensure mm_users can't go down 702cddb8a5cSAndrea Arcangeli * to zero while this runs to avoid races with mmu_notifier_release, 703cddb8a5cSAndrea Arcangeli * so mm has to be current->mm or the mm should be pinned safely such 704cddb8a5cSAndrea Arcangeli * as with get_task_mm(). If the mm is not current->mm, the mm_users 705cddb8a5cSAndrea Arcangeli * pin should be released by calling mmput after mmu_notifier_register 7062c7933f5SJason Gunthorpe * returns. 7072c7933f5SJason Gunthorpe * 7082c7933f5SJason Gunthorpe * mmu_notifier_unregister() or mmu_notifier_put() must be always called to 7092c7933f5SJason Gunthorpe * unregister the notifier. 7102c7933f5SJason Gunthorpe * 7111991722aSJason Gunthorpe * While the caller has a mmu_notifier get the subscription->mm pointer will remain 7122c7933f5SJason Gunthorpe * valid, and can be converted to an active mm pointer via mmget_not_zero(). 713cddb8a5cSAndrea Arcangeli */ 7141991722aSJason Gunthorpe int mmu_notifier_register(struct mmu_notifier *subscription, 7151991722aSJason Gunthorpe struct mm_struct *mm) 716cddb8a5cSAndrea Arcangeli { 71756c57103SJason Gunthorpe int ret; 71856c57103SJason Gunthorpe 719d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 7201991722aSJason Gunthorpe ret = __mmu_notifier_register(subscription, mm); 721d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 72256c57103SJason Gunthorpe return ret; 723cddb8a5cSAndrea Arcangeli } 724cddb8a5cSAndrea Arcangeli EXPORT_SYMBOL_GPL(mmu_notifier_register); 725cddb8a5cSAndrea Arcangeli 7262c7933f5SJason Gunthorpe static struct mmu_notifier * 7272c7933f5SJason Gunthorpe find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops) 7282c7933f5SJason Gunthorpe { 7291991722aSJason Gunthorpe struct mmu_notifier *subscription; 7302c7933f5SJason Gunthorpe 731984cfe4eSJason Gunthorpe spin_lock(&mm->notifier_subscriptions->lock); 7321991722aSJason Gunthorpe hlist_for_each_entry_rcu(subscription, 73363886badSQian Cai &mm->notifier_subscriptions->list, hlist, 73463886badSQian Cai lockdep_is_held(&mm->notifier_subscriptions->lock)) { 7351991722aSJason Gunthorpe if (subscription->ops != ops) 7362c7933f5SJason Gunthorpe continue; 7372c7933f5SJason Gunthorpe 7381991722aSJason Gunthorpe if (likely(subscription->users != UINT_MAX)) 7391991722aSJason Gunthorpe subscription->users++; 7402c7933f5SJason Gunthorpe else 7411991722aSJason Gunthorpe subscription = ERR_PTR(-EOVERFLOW); 742984cfe4eSJason Gunthorpe spin_unlock(&mm->notifier_subscriptions->lock); 7431991722aSJason Gunthorpe return subscription; 7442c7933f5SJason Gunthorpe } 745984cfe4eSJason Gunthorpe spin_unlock(&mm->notifier_subscriptions->lock); 7462c7933f5SJason Gunthorpe return NULL; 7472c7933f5SJason Gunthorpe } 7482c7933f5SJason Gunthorpe 7492c7933f5SJason Gunthorpe /** 7502c7933f5SJason Gunthorpe * mmu_notifier_get_locked - Return the single struct mmu_notifier for 7512c7933f5SJason Gunthorpe * the mm & ops 7522c7933f5SJason Gunthorpe * @ops: The operations struct being subscribe with 7532c7933f5SJason Gunthorpe * @mm : The mm to attach notifiers too 7542c7933f5SJason Gunthorpe * 7552c7933f5SJason Gunthorpe * This function either allocates a new mmu_notifier via 7562c7933f5SJason Gunthorpe * ops->alloc_notifier(), or returns an already existing notifier on the 7572c7933f5SJason Gunthorpe * list. The value of the ops pointer is used to determine when two notifiers 7582c7933f5SJason Gunthorpe * are the same. 7592c7933f5SJason Gunthorpe * 7602c7933f5SJason Gunthorpe * Each call to mmu_notifier_get() must be paired with a call to 761c1e8d7c6SMichel Lespinasse * mmu_notifier_put(). The caller must hold the write side of mm->mmap_lock. 7622c7933f5SJason Gunthorpe * 7632c7933f5SJason Gunthorpe * While the caller has a mmu_notifier get the mm pointer will remain valid, 7642c7933f5SJason Gunthorpe * and can be converted to an active mm pointer via mmget_not_zero(). 7652c7933f5SJason Gunthorpe */ 7662c7933f5SJason Gunthorpe struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops, 7672c7933f5SJason Gunthorpe struct mm_struct *mm) 7682c7933f5SJason Gunthorpe { 7691991722aSJason Gunthorpe struct mmu_notifier *subscription; 7702c7933f5SJason Gunthorpe int ret; 7712c7933f5SJason Gunthorpe 77242fc5414SMichel Lespinasse mmap_assert_write_locked(mm); 7732c7933f5SJason Gunthorpe 774984cfe4eSJason Gunthorpe if (mm->notifier_subscriptions) { 7751991722aSJason Gunthorpe subscription = find_get_mmu_notifier(mm, ops); 7761991722aSJason Gunthorpe if (subscription) 7771991722aSJason Gunthorpe return subscription; 7782c7933f5SJason Gunthorpe } 7792c7933f5SJason Gunthorpe 7801991722aSJason Gunthorpe subscription = ops->alloc_notifier(mm); 7811991722aSJason Gunthorpe if (IS_ERR(subscription)) 7821991722aSJason Gunthorpe return subscription; 7831991722aSJason Gunthorpe subscription->ops = ops; 7841991722aSJason Gunthorpe ret = __mmu_notifier_register(subscription, mm); 7852c7933f5SJason Gunthorpe if (ret) 7862c7933f5SJason Gunthorpe goto out_free; 7871991722aSJason Gunthorpe return subscription; 7882c7933f5SJason Gunthorpe out_free: 7891991722aSJason Gunthorpe subscription->ops->free_notifier(subscription); 7902c7933f5SJason Gunthorpe return ERR_PTR(ret); 7912c7933f5SJason Gunthorpe } 7922c7933f5SJason Gunthorpe EXPORT_SYMBOL_GPL(mmu_notifier_get_locked); 7932c7933f5SJason Gunthorpe 794cddb8a5cSAndrea Arcangeli /* this is called after the last mmu_notifier_unregister() returned */ 795984cfe4eSJason Gunthorpe void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm) 796cddb8a5cSAndrea Arcangeli { 797984cfe4eSJason Gunthorpe BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list)); 798984cfe4eSJason Gunthorpe kfree(mm->notifier_subscriptions); 799984cfe4eSJason Gunthorpe mm->notifier_subscriptions = LIST_POISON1; /* debug */ 800cddb8a5cSAndrea Arcangeli } 801cddb8a5cSAndrea Arcangeli 802cddb8a5cSAndrea Arcangeli /* 803cddb8a5cSAndrea Arcangeli * This releases the mm_count pin automatically and frees the mm 804cddb8a5cSAndrea Arcangeli * structure if it was the last user of it. It serializes against 80521a92735SSagi Grimberg * running mmu notifiers with SRCU and against mmu_notifier_unregister 80621a92735SSagi Grimberg * with the unregister lock + SRCU. All sptes must be dropped before 807cddb8a5cSAndrea Arcangeli * calling mmu_notifier_unregister. ->release or any other notifier 808cddb8a5cSAndrea Arcangeli * method may be invoked concurrently with mmu_notifier_unregister, 809cddb8a5cSAndrea Arcangeli * and only after mmu_notifier_unregister returned we're guaranteed 810cddb8a5cSAndrea Arcangeli * that ->release or any other method can't run anymore. 811cddb8a5cSAndrea Arcangeli */ 8121991722aSJason Gunthorpe void mmu_notifier_unregister(struct mmu_notifier *subscription, 8131991722aSJason Gunthorpe struct mm_struct *mm) 814cddb8a5cSAndrea Arcangeli { 815cddb8a5cSAndrea Arcangeli BUG_ON(atomic_read(&mm->mm_count) <= 0); 816cddb8a5cSAndrea Arcangeli 8171991722aSJason Gunthorpe if (!hlist_unhashed(&subscription->hlist)) { 818d34883d4SXiao Guangrong /* 819d34883d4SXiao Guangrong * SRCU here will force exit_mmap to wait for ->release to 820d34883d4SXiao Guangrong * finish before freeing the pages. 821d34883d4SXiao Guangrong */ 82221a92735SSagi Grimberg int id; 8233ad3d901SXiao Guangrong 824751efd86SRobin Holt id = srcu_read_lock(&srcu); 825d34883d4SXiao Guangrong /* 826d34883d4SXiao Guangrong * exit_mmap will block in mmu_notifier_release to guarantee 827d34883d4SXiao Guangrong * that ->release is called before freeing the pages. 828d34883d4SXiao Guangrong */ 8291991722aSJason Gunthorpe if (subscription->ops->release) 8301991722aSJason Gunthorpe subscription->ops->release(subscription, mm); 831751efd86SRobin Holt srcu_read_unlock(&srcu, id); 832d34883d4SXiao Guangrong 833984cfe4eSJason Gunthorpe spin_lock(&mm->notifier_subscriptions->lock); 834d34883d4SXiao Guangrong /* 835d34883d4SXiao Guangrong * Can not use list_del_rcu() since __mmu_notifier_release 836d34883d4SXiao Guangrong * can delete it before we hold the lock. 837d34883d4SXiao Guangrong */ 8381991722aSJason Gunthorpe hlist_del_init_rcu(&subscription->hlist); 839984cfe4eSJason Gunthorpe spin_unlock(&mm->notifier_subscriptions->lock); 840d34883d4SXiao Guangrong } 841751efd86SRobin Holt 842751efd86SRobin Holt /* 843d34883d4SXiao Guangrong * Wait for any running method to finish, of course including 84483a35e36SGeert Uytterhoeven * ->release if it was run by mmu_notifier_release instead of us. 845cddb8a5cSAndrea Arcangeli */ 84621a92735SSagi Grimberg synchronize_srcu(&srcu); 847cddb8a5cSAndrea Arcangeli 848cddb8a5cSAndrea Arcangeli BUG_ON(atomic_read(&mm->mm_count) <= 0); 849cddb8a5cSAndrea Arcangeli 850cddb8a5cSAndrea Arcangeli mmdrop(mm); 851cddb8a5cSAndrea Arcangeli } 852cddb8a5cSAndrea Arcangeli EXPORT_SYMBOL_GPL(mmu_notifier_unregister); 85321a92735SSagi Grimberg 8542c7933f5SJason Gunthorpe static void mmu_notifier_free_rcu(struct rcu_head *rcu) 8552c7933f5SJason Gunthorpe { 8561991722aSJason Gunthorpe struct mmu_notifier *subscription = 8571991722aSJason Gunthorpe container_of(rcu, struct mmu_notifier, rcu); 8581991722aSJason Gunthorpe struct mm_struct *mm = subscription->mm; 8592c7933f5SJason Gunthorpe 8601991722aSJason Gunthorpe subscription->ops->free_notifier(subscription); 8612c7933f5SJason Gunthorpe /* Pairs with the get in __mmu_notifier_register() */ 8622c7933f5SJason Gunthorpe mmdrop(mm); 8632c7933f5SJason Gunthorpe } 8642c7933f5SJason Gunthorpe 8652c7933f5SJason Gunthorpe /** 8662c7933f5SJason Gunthorpe * mmu_notifier_put - Release the reference on the notifier 867d49653f3SKrzysztof Kozlowski * @subscription: The notifier to act on 8682c7933f5SJason Gunthorpe * 8692c7933f5SJason Gunthorpe * This function must be paired with each mmu_notifier_get(), it releases the 8702c7933f5SJason Gunthorpe * reference obtained by the get. If this is the last reference then process 8712c7933f5SJason Gunthorpe * to free the notifier will be run asynchronously. 8722c7933f5SJason Gunthorpe * 8732c7933f5SJason Gunthorpe * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release 8742c7933f5SJason Gunthorpe * when the mm_struct is destroyed. Instead free_notifier is always called to 8752c7933f5SJason Gunthorpe * release any resources held by the user. 8762c7933f5SJason Gunthorpe * 8772c7933f5SJason Gunthorpe * As ops->release is not guaranteed to be called, the user must ensure that 8782c7933f5SJason Gunthorpe * all sptes are dropped, and no new sptes can be established before 8792c7933f5SJason Gunthorpe * mmu_notifier_put() is called. 8802c7933f5SJason Gunthorpe * 8812c7933f5SJason Gunthorpe * This function can be called from the ops->release callback, however the 8822c7933f5SJason Gunthorpe * caller must still ensure it is called pairwise with mmu_notifier_get(). 8832c7933f5SJason Gunthorpe * 8842c7933f5SJason Gunthorpe * Modules calling this function must call mmu_notifier_synchronize() in 8852c7933f5SJason Gunthorpe * their __exit functions to ensure the async work is completed. 8862c7933f5SJason Gunthorpe */ 8871991722aSJason Gunthorpe void mmu_notifier_put(struct mmu_notifier *subscription) 8882c7933f5SJason Gunthorpe { 8891991722aSJason Gunthorpe struct mm_struct *mm = subscription->mm; 8902c7933f5SJason Gunthorpe 891984cfe4eSJason Gunthorpe spin_lock(&mm->notifier_subscriptions->lock); 8921991722aSJason Gunthorpe if (WARN_ON(!subscription->users) || --subscription->users) 8932c7933f5SJason Gunthorpe goto out_unlock; 8941991722aSJason Gunthorpe hlist_del_init_rcu(&subscription->hlist); 895984cfe4eSJason Gunthorpe spin_unlock(&mm->notifier_subscriptions->lock); 8962c7933f5SJason Gunthorpe 8971991722aSJason Gunthorpe call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu); 8982c7933f5SJason Gunthorpe return; 8992c7933f5SJason Gunthorpe 9002c7933f5SJason Gunthorpe out_unlock: 901984cfe4eSJason Gunthorpe spin_unlock(&mm->notifier_subscriptions->lock); 9022c7933f5SJason Gunthorpe } 9032c7933f5SJason Gunthorpe EXPORT_SYMBOL_GPL(mmu_notifier_put); 9042c7933f5SJason Gunthorpe 90599cb252fSJason Gunthorpe static int __mmu_interval_notifier_insert( 9065292e24aSJason Gunthorpe struct mmu_interval_notifier *interval_sub, struct mm_struct *mm, 907984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions *subscriptions, unsigned long start, 90899cb252fSJason Gunthorpe unsigned long length, const struct mmu_interval_notifier_ops *ops) 90999cb252fSJason Gunthorpe { 9105292e24aSJason Gunthorpe interval_sub->mm = mm; 9115292e24aSJason Gunthorpe interval_sub->ops = ops; 9125292e24aSJason Gunthorpe RB_CLEAR_NODE(&interval_sub->interval_tree.rb); 9135292e24aSJason Gunthorpe interval_sub->interval_tree.start = start; 91499cb252fSJason Gunthorpe /* 91599cb252fSJason Gunthorpe * Note that the representation of the intervals in the interval tree 91699cb252fSJason Gunthorpe * considers the ending point as contained in the interval. 91799cb252fSJason Gunthorpe */ 91899cb252fSJason Gunthorpe if (length == 0 || 9195292e24aSJason Gunthorpe check_add_overflow(start, length - 1, 9205292e24aSJason Gunthorpe &interval_sub->interval_tree.last)) 92199cb252fSJason Gunthorpe return -EOVERFLOW; 92299cb252fSJason Gunthorpe 92399cb252fSJason Gunthorpe /* Must call with a mmget() held */ 924c9682d10SJann Horn if (WARN_ON(atomic_read(&mm->mm_users) <= 0)) 92599cb252fSJason Gunthorpe return -EINVAL; 92699cb252fSJason Gunthorpe 92799cb252fSJason Gunthorpe /* pairs with mmdrop in mmu_interval_notifier_remove() */ 92899cb252fSJason Gunthorpe mmgrab(mm); 92999cb252fSJason Gunthorpe 93099cb252fSJason Gunthorpe /* 93199cb252fSJason Gunthorpe * If some invalidate_range_start/end region is going on in parallel 93299cb252fSJason Gunthorpe * we don't know what VA ranges are affected, so we must assume this 93399cb252fSJason Gunthorpe * new range is included. 93499cb252fSJason Gunthorpe * 93599cb252fSJason Gunthorpe * If the itree is invalidating then we are not allowed to change 93699cb252fSJason Gunthorpe * it. Retrying until invalidation is done is tricky due to the 93799cb252fSJason Gunthorpe * possibility for live lock, instead defer the add to 93899cb252fSJason Gunthorpe * mn_itree_inv_end() so this algorithm is deterministic. 93999cb252fSJason Gunthorpe * 9405292e24aSJason Gunthorpe * In all cases the value for the interval_sub->invalidate_seq should be 94199cb252fSJason Gunthorpe * odd, see mmu_interval_read_begin() 94299cb252fSJason Gunthorpe */ 943984cfe4eSJason Gunthorpe spin_lock(&subscriptions->lock); 944984cfe4eSJason Gunthorpe if (subscriptions->active_invalidate_ranges) { 945984cfe4eSJason Gunthorpe if (mn_itree_is_invalidating(subscriptions)) 9465292e24aSJason Gunthorpe hlist_add_head(&interval_sub->deferred_item, 947984cfe4eSJason Gunthorpe &subscriptions->deferred_list); 94899cb252fSJason Gunthorpe else { 949984cfe4eSJason Gunthorpe subscriptions->invalidate_seq |= 1; 9505292e24aSJason Gunthorpe interval_tree_insert(&interval_sub->interval_tree, 951984cfe4eSJason Gunthorpe &subscriptions->itree); 95299cb252fSJason Gunthorpe } 9535292e24aSJason Gunthorpe interval_sub->invalidate_seq = subscriptions->invalidate_seq; 95499cb252fSJason Gunthorpe } else { 955984cfe4eSJason Gunthorpe WARN_ON(mn_itree_is_invalidating(subscriptions)); 95699cb252fSJason Gunthorpe /* 9575292e24aSJason Gunthorpe * The starting seq for a subscription not under invalidation 9585292e24aSJason Gunthorpe * should be odd, not equal to the current invalidate_seq and 95999cb252fSJason Gunthorpe * invalidate_seq should not 'wrap' to the new seq any time 96099cb252fSJason Gunthorpe * soon. 96199cb252fSJason Gunthorpe */ 9625292e24aSJason Gunthorpe interval_sub->invalidate_seq = 9635292e24aSJason Gunthorpe subscriptions->invalidate_seq - 1; 9645292e24aSJason Gunthorpe interval_tree_insert(&interval_sub->interval_tree, 965984cfe4eSJason Gunthorpe &subscriptions->itree); 96699cb252fSJason Gunthorpe } 967984cfe4eSJason Gunthorpe spin_unlock(&subscriptions->lock); 96899cb252fSJason Gunthorpe return 0; 96999cb252fSJason Gunthorpe } 97099cb252fSJason Gunthorpe 97199cb252fSJason Gunthorpe /** 97299cb252fSJason Gunthorpe * mmu_interval_notifier_insert - Insert an interval notifier 9735292e24aSJason Gunthorpe * @interval_sub: Interval subscription to register 97499cb252fSJason Gunthorpe * @start: Starting virtual address to monitor 97599cb252fSJason Gunthorpe * @length: Length of the range to monitor 97699cb252fSJason Gunthorpe * @mm: mm_struct to attach to 977d49653f3SKrzysztof Kozlowski * @ops: Interval notifier operations to be called on matching events 97899cb252fSJason Gunthorpe * 97999cb252fSJason Gunthorpe * This function subscribes the interval notifier for notifications from the 98099cb252fSJason Gunthorpe * mm. Upon return the ops related to mmu_interval_notifier will be called 98199cb252fSJason Gunthorpe * whenever an event that intersects with the given range occurs. 98299cb252fSJason Gunthorpe * 98399cb252fSJason Gunthorpe * Upon return the range_notifier may not be present in the interval tree yet. 98499cb252fSJason Gunthorpe * The caller must use the normal interval notifier read flow via 98599cb252fSJason Gunthorpe * mmu_interval_read_begin() to establish SPTEs for this range. 98699cb252fSJason Gunthorpe */ 9875292e24aSJason Gunthorpe int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub, 98899cb252fSJason Gunthorpe struct mm_struct *mm, unsigned long start, 98999cb252fSJason Gunthorpe unsigned long length, 99099cb252fSJason Gunthorpe const struct mmu_interval_notifier_ops *ops) 99199cb252fSJason Gunthorpe { 992984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions *subscriptions; 99399cb252fSJason Gunthorpe int ret; 99499cb252fSJason Gunthorpe 995da1c55f1SMichel Lespinasse might_lock(&mm->mmap_lock); 99699cb252fSJason Gunthorpe 997984cfe4eSJason Gunthorpe subscriptions = smp_load_acquire(&mm->notifier_subscriptions); 998984cfe4eSJason Gunthorpe if (!subscriptions || !subscriptions->has_itree) { 99999cb252fSJason Gunthorpe ret = mmu_notifier_register(NULL, mm); 100099cb252fSJason Gunthorpe if (ret) 100199cb252fSJason Gunthorpe return ret; 1002984cfe4eSJason Gunthorpe subscriptions = mm->notifier_subscriptions; 100399cb252fSJason Gunthorpe } 10045292e24aSJason Gunthorpe return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions, 10055292e24aSJason Gunthorpe start, length, ops); 100699cb252fSJason Gunthorpe } 100799cb252fSJason Gunthorpe EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert); 100899cb252fSJason Gunthorpe 100999cb252fSJason Gunthorpe int mmu_interval_notifier_insert_locked( 10105292e24aSJason Gunthorpe struct mmu_interval_notifier *interval_sub, struct mm_struct *mm, 101199cb252fSJason Gunthorpe unsigned long start, unsigned long length, 101299cb252fSJason Gunthorpe const struct mmu_interval_notifier_ops *ops) 101399cb252fSJason Gunthorpe { 1014984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions *subscriptions = 1015984cfe4eSJason Gunthorpe mm->notifier_subscriptions; 101699cb252fSJason Gunthorpe int ret; 101799cb252fSJason Gunthorpe 101842fc5414SMichel Lespinasse mmap_assert_write_locked(mm); 101999cb252fSJason Gunthorpe 1020984cfe4eSJason Gunthorpe if (!subscriptions || !subscriptions->has_itree) { 102199cb252fSJason Gunthorpe ret = __mmu_notifier_register(NULL, mm); 102299cb252fSJason Gunthorpe if (ret) 102399cb252fSJason Gunthorpe return ret; 1024984cfe4eSJason Gunthorpe subscriptions = mm->notifier_subscriptions; 102599cb252fSJason Gunthorpe } 10265292e24aSJason Gunthorpe return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions, 10275292e24aSJason Gunthorpe start, length, ops); 102899cb252fSJason Gunthorpe } 102999cb252fSJason Gunthorpe EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked); 103099cb252fSJason Gunthorpe 103131956166SAlistair Popple static bool 103231956166SAlistair Popple mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions, 103331956166SAlistair Popple unsigned long seq) 103431956166SAlistair Popple { 103531956166SAlistair Popple bool ret; 103631956166SAlistair Popple 103731956166SAlistair Popple spin_lock(&subscriptions->lock); 103831956166SAlistair Popple ret = subscriptions->invalidate_seq != seq; 103931956166SAlistair Popple spin_unlock(&subscriptions->lock); 104031956166SAlistair Popple return ret; 104131956166SAlistair Popple } 104231956166SAlistair Popple 104399cb252fSJason Gunthorpe /** 104499cb252fSJason Gunthorpe * mmu_interval_notifier_remove - Remove a interval notifier 10455292e24aSJason Gunthorpe * @interval_sub: Interval subscription to unregister 104699cb252fSJason Gunthorpe * 104799cb252fSJason Gunthorpe * This function must be paired with mmu_interval_notifier_insert(). It cannot 104899cb252fSJason Gunthorpe * be called from any ops callback. 104999cb252fSJason Gunthorpe * 105099cb252fSJason Gunthorpe * Once this returns ops callbacks are no longer running on other CPUs and 105199cb252fSJason Gunthorpe * will not be called in future. 105299cb252fSJason Gunthorpe */ 10535292e24aSJason Gunthorpe void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub) 105499cb252fSJason Gunthorpe { 10555292e24aSJason Gunthorpe struct mm_struct *mm = interval_sub->mm; 1056984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions *subscriptions = 1057984cfe4eSJason Gunthorpe mm->notifier_subscriptions; 105899cb252fSJason Gunthorpe unsigned long seq = 0; 105999cb252fSJason Gunthorpe 106099cb252fSJason Gunthorpe might_sleep(); 106199cb252fSJason Gunthorpe 1062984cfe4eSJason Gunthorpe spin_lock(&subscriptions->lock); 1063984cfe4eSJason Gunthorpe if (mn_itree_is_invalidating(subscriptions)) { 106499cb252fSJason Gunthorpe /* 106599cb252fSJason Gunthorpe * remove is being called after insert put this on the 106699cb252fSJason Gunthorpe * deferred list, but before the deferred list was processed. 106799cb252fSJason Gunthorpe */ 10685292e24aSJason Gunthorpe if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) { 10695292e24aSJason Gunthorpe hlist_del(&interval_sub->deferred_item); 107099cb252fSJason Gunthorpe } else { 10715292e24aSJason Gunthorpe hlist_add_head(&interval_sub->deferred_item, 1072984cfe4eSJason Gunthorpe &subscriptions->deferred_list); 1073984cfe4eSJason Gunthorpe seq = subscriptions->invalidate_seq; 107499cb252fSJason Gunthorpe } 107599cb252fSJason Gunthorpe } else { 10765292e24aSJason Gunthorpe WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb)); 10775292e24aSJason Gunthorpe interval_tree_remove(&interval_sub->interval_tree, 1078984cfe4eSJason Gunthorpe &subscriptions->itree); 107999cb252fSJason Gunthorpe } 1080984cfe4eSJason Gunthorpe spin_unlock(&subscriptions->lock); 108199cb252fSJason Gunthorpe 108299cb252fSJason Gunthorpe /* 108399cb252fSJason Gunthorpe * The possible sleep on progress in the invalidation requires the 108499cb252fSJason Gunthorpe * caller not hold any locks held by invalidation callbacks. 108599cb252fSJason Gunthorpe */ 108699cb252fSJason Gunthorpe lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 108799cb252fSJason Gunthorpe lock_map_release(&__mmu_notifier_invalidate_range_start_map); 108899cb252fSJason Gunthorpe if (seq) 1089984cfe4eSJason Gunthorpe wait_event(subscriptions->wq, 109031956166SAlistair Popple mmu_interval_seq_released(subscriptions, seq)); 109199cb252fSJason Gunthorpe 109299cb252fSJason Gunthorpe /* pairs with mmgrab in mmu_interval_notifier_insert() */ 109399cb252fSJason Gunthorpe mmdrop(mm); 109499cb252fSJason Gunthorpe } 109599cb252fSJason Gunthorpe EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove); 109699cb252fSJason Gunthorpe 10972c7933f5SJason Gunthorpe /** 10982c7933f5SJason Gunthorpe * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed 10992c7933f5SJason Gunthorpe * 11002c7933f5SJason Gunthorpe * This function ensures that all outstanding async SRU work from 11012c7933f5SJason Gunthorpe * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops 11022c7933f5SJason Gunthorpe * associated with an unused mmu_notifier will no longer be called. 11032c7933f5SJason Gunthorpe * 11042c7933f5SJason Gunthorpe * Before using the caller must ensure that all of its mmu_notifiers have been 11052c7933f5SJason Gunthorpe * fully released via mmu_notifier_put(). 11062c7933f5SJason Gunthorpe * 11072c7933f5SJason Gunthorpe * Modules using the mmu_notifier_put() API should call this in their __exit 11082c7933f5SJason Gunthorpe * function to avoid module unloading races. 11092c7933f5SJason Gunthorpe */ 11102c7933f5SJason Gunthorpe void mmu_notifier_synchronize(void) 11112c7933f5SJason Gunthorpe { 11122c7933f5SJason Gunthorpe synchronize_srcu(&srcu); 11132c7933f5SJason Gunthorpe } 11142c7933f5SJason Gunthorpe EXPORT_SYMBOL_GPL(mmu_notifier_synchronize); 1115