120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2cddb8a5cSAndrea Arcangeli /* 3cddb8a5cSAndrea Arcangeli * linux/mm/mmu_notifier.c 4cddb8a5cSAndrea Arcangeli * 5cddb8a5cSAndrea Arcangeli * Copyright (C) 2008 Qumranet, Inc. 6cddb8a5cSAndrea Arcangeli * Copyright (C) 2008 SGI 793e205a7SChristoph Lameter * Christoph Lameter <cl@linux.com> 8cddb8a5cSAndrea Arcangeli */ 9cddb8a5cSAndrea Arcangeli 10cddb8a5cSAndrea Arcangeli #include <linux/rculist.h> 11cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 12b95f1b31SPaul Gortmaker #include <linux/export.h> 13cddb8a5cSAndrea Arcangeli #include <linux/mm.h> 14cddb8a5cSAndrea Arcangeli #include <linux/err.h> 1599cb252fSJason Gunthorpe #include <linux/interval_tree.h> 1621a92735SSagi Grimberg #include <linux/srcu.h> 17cddb8a5cSAndrea Arcangeli #include <linux/rcupdate.h> 18cddb8a5cSAndrea Arcangeli #include <linux/sched.h> 196e84f315SIngo Molnar #include <linux/sched/mm.h> 205a0e3ad6STejun Heo #include <linux/slab.h> 21cddb8a5cSAndrea Arcangeli 2221a92735SSagi Grimberg /* global SRCU for all MMs */ 23dde8da6cSPaul E. McKenney DEFINE_STATIC_SRCU(srcu); 2421a92735SSagi Grimberg 2523b68395SDaniel Vetter #ifdef CONFIG_LOCKDEP 2623b68395SDaniel Vetter struct lockdep_map __mmu_notifier_invalidate_range_start_map = { 2723b68395SDaniel Vetter .name = "mmu_notifier_invalidate_range_start" 2823b68395SDaniel Vetter }; 2923b68395SDaniel Vetter #endif 3023b68395SDaniel Vetter 31cddb8a5cSAndrea Arcangeli /* 32*984cfe4eSJason Gunthorpe * The mmu_notifier_subscriptions structure is allocated and installed in 33*984cfe4eSJason Gunthorpe * mm->notifier_subscriptions inside the mm_take_all_locks() protected 3456f434f4SJason Gunthorpe * critical section and it's released only when mm_count reaches zero 3556f434f4SJason Gunthorpe * in mmdrop(). 3656f434f4SJason Gunthorpe */ 37*984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions { 3856f434f4SJason Gunthorpe /* all mmu notifiers registered in this mm are queued in this list */ 3956f434f4SJason Gunthorpe struct hlist_head list; 4099cb252fSJason Gunthorpe bool has_itree; 4156f434f4SJason Gunthorpe /* to serialize the list modifications and hlist_unhashed */ 4256f434f4SJason Gunthorpe spinlock_t lock; 4399cb252fSJason Gunthorpe unsigned long invalidate_seq; 4499cb252fSJason Gunthorpe unsigned long active_invalidate_ranges; 4599cb252fSJason Gunthorpe struct rb_root_cached itree; 4699cb252fSJason Gunthorpe wait_queue_head_t wq; 4799cb252fSJason Gunthorpe struct hlist_head deferred_list; 4856f434f4SJason Gunthorpe }; 4956f434f4SJason Gunthorpe 5056f434f4SJason Gunthorpe /* 5199cb252fSJason Gunthorpe * This is a collision-retry read-side/write-side 'lock', a lot like a 5299cb252fSJason Gunthorpe * seqcount, however this allows multiple write-sides to hold it at 5399cb252fSJason Gunthorpe * once. Conceptually the write side is protecting the values of the PTEs in 5499cb252fSJason Gunthorpe * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any 5599cb252fSJason Gunthorpe * writer exists. 5699cb252fSJason Gunthorpe * 5799cb252fSJason Gunthorpe * Note that the core mm creates nested invalidate_range_start()/end() regions 5899cb252fSJason Gunthorpe * within the same thread, and runs invalidate_range_start()/end() in parallel 5999cb252fSJason Gunthorpe * on multiple CPUs. This is designed to not reduce concurrency or block 6099cb252fSJason Gunthorpe * progress on the mm side. 6199cb252fSJason Gunthorpe * 6299cb252fSJason Gunthorpe * As a secondary function, holding the full write side also serves to prevent 6399cb252fSJason Gunthorpe * writers for the itree, this is an optimization to avoid extra locking 6499cb252fSJason Gunthorpe * during invalidate_range_start/end notifiers. 6599cb252fSJason Gunthorpe * 6699cb252fSJason Gunthorpe * The write side has two states, fully excluded: 6799cb252fSJason Gunthorpe * - mm->active_invalidate_ranges != 0 68*984cfe4eSJason Gunthorpe * - subscriptions->invalidate_seq & 1 == True (odd) 6999cb252fSJason Gunthorpe * - some range on the mm_struct is being invalidated 7099cb252fSJason Gunthorpe * - the itree is not allowed to change 7199cb252fSJason Gunthorpe * 7299cb252fSJason Gunthorpe * And partially excluded: 7399cb252fSJason Gunthorpe * - mm->active_invalidate_ranges != 0 74*984cfe4eSJason Gunthorpe * - subscriptions->invalidate_seq & 1 == False (even) 7599cb252fSJason Gunthorpe * - some range on the mm_struct is being invalidated 7699cb252fSJason Gunthorpe * - the itree is allowed to change 7799cb252fSJason Gunthorpe * 78*984cfe4eSJason Gunthorpe * Operations on notifier_subscriptions->invalidate_seq (under spinlock): 7999cb252fSJason Gunthorpe * seq |= 1 # Begin writing 8099cb252fSJason Gunthorpe * seq++ # Release the writing state 8199cb252fSJason Gunthorpe * seq & 1 # True if a writer exists 8299cb252fSJason Gunthorpe * 8399cb252fSJason Gunthorpe * The later state avoids some expensive work on inv_end in the common case of 8499cb252fSJason Gunthorpe * no mni monitoring the VA. 8599cb252fSJason Gunthorpe */ 86*984cfe4eSJason Gunthorpe static bool 87*984cfe4eSJason Gunthorpe mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions) 8899cb252fSJason Gunthorpe { 89*984cfe4eSJason Gunthorpe lockdep_assert_held(&subscriptions->lock); 90*984cfe4eSJason Gunthorpe return subscriptions->invalidate_seq & 1; 9199cb252fSJason Gunthorpe } 9299cb252fSJason Gunthorpe 9399cb252fSJason Gunthorpe static struct mmu_interval_notifier * 94*984cfe4eSJason Gunthorpe mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions, 9599cb252fSJason Gunthorpe const struct mmu_notifier_range *range, 9699cb252fSJason Gunthorpe unsigned long *seq) 9799cb252fSJason Gunthorpe { 9899cb252fSJason Gunthorpe struct interval_tree_node *node; 9999cb252fSJason Gunthorpe struct mmu_interval_notifier *res = NULL; 10099cb252fSJason Gunthorpe 101*984cfe4eSJason Gunthorpe spin_lock(&subscriptions->lock); 102*984cfe4eSJason Gunthorpe subscriptions->active_invalidate_ranges++; 103*984cfe4eSJason Gunthorpe node = interval_tree_iter_first(&subscriptions->itree, range->start, 10499cb252fSJason Gunthorpe range->end - 1); 10599cb252fSJason Gunthorpe if (node) { 106*984cfe4eSJason Gunthorpe subscriptions->invalidate_seq |= 1; 10799cb252fSJason Gunthorpe res = container_of(node, struct mmu_interval_notifier, 10899cb252fSJason Gunthorpe interval_tree); 10999cb252fSJason Gunthorpe } 11099cb252fSJason Gunthorpe 111*984cfe4eSJason Gunthorpe *seq = subscriptions->invalidate_seq; 112*984cfe4eSJason Gunthorpe spin_unlock(&subscriptions->lock); 11399cb252fSJason Gunthorpe return res; 11499cb252fSJason Gunthorpe } 11599cb252fSJason Gunthorpe 11699cb252fSJason Gunthorpe static struct mmu_interval_notifier * 11799cb252fSJason Gunthorpe mn_itree_inv_next(struct mmu_interval_notifier *mni, 11899cb252fSJason Gunthorpe const struct mmu_notifier_range *range) 11999cb252fSJason Gunthorpe { 12099cb252fSJason Gunthorpe struct interval_tree_node *node; 12199cb252fSJason Gunthorpe 12299cb252fSJason Gunthorpe node = interval_tree_iter_next(&mni->interval_tree, range->start, 12399cb252fSJason Gunthorpe range->end - 1); 12499cb252fSJason Gunthorpe if (!node) 12599cb252fSJason Gunthorpe return NULL; 12699cb252fSJason Gunthorpe return container_of(node, struct mmu_interval_notifier, interval_tree); 12799cb252fSJason Gunthorpe } 12899cb252fSJason Gunthorpe 129*984cfe4eSJason Gunthorpe static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions) 13099cb252fSJason Gunthorpe { 13199cb252fSJason Gunthorpe struct mmu_interval_notifier *mni; 13299cb252fSJason Gunthorpe struct hlist_node *next; 13399cb252fSJason Gunthorpe 134*984cfe4eSJason Gunthorpe spin_lock(&subscriptions->lock); 135*984cfe4eSJason Gunthorpe if (--subscriptions->active_invalidate_ranges || 136*984cfe4eSJason Gunthorpe !mn_itree_is_invalidating(subscriptions)) { 137*984cfe4eSJason Gunthorpe spin_unlock(&subscriptions->lock); 13899cb252fSJason Gunthorpe return; 13999cb252fSJason Gunthorpe } 14099cb252fSJason Gunthorpe 14199cb252fSJason Gunthorpe /* Make invalidate_seq even */ 142*984cfe4eSJason Gunthorpe subscriptions->invalidate_seq++; 14399cb252fSJason Gunthorpe 14499cb252fSJason Gunthorpe /* 14599cb252fSJason Gunthorpe * The inv_end incorporates a deferred mechanism like rtnl_unlock(). 14699cb252fSJason Gunthorpe * Adds and removes are queued until the final inv_end happens then 14799cb252fSJason Gunthorpe * they are progressed. This arrangement for tree updates is used to 14899cb252fSJason Gunthorpe * avoid using a blocking lock during invalidate_range_start. 14999cb252fSJason Gunthorpe */ 150*984cfe4eSJason Gunthorpe hlist_for_each_entry_safe(mni, next, &subscriptions->deferred_list, 15199cb252fSJason Gunthorpe deferred_item) { 15299cb252fSJason Gunthorpe if (RB_EMPTY_NODE(&mni->interval_tree.rb)) 15399cb252fSJason Gunthorpe interval_tree_insert(&mni->interval_tree, 154*984cfe4eSJason Gunthorpe &subscriptions->itree); 15599cb252fSJason Gunthorpe else 15699cb252fSJason Gunthorpe interval_tree_remove(&mni->interval_tree, 157*984cfe4eSJason Gunthorpe &subscriptions->itree); 15899cb252fSJason Gunthorpe hlist_del(&mni->deferred_item); 15999cb252fSJason Gunthorpe } 160*984cfe4eSJason Gunthorpe spin_unlock(&subscriptions->lock); 16199cb252fSJason Gunthorpe 162*984cfe4eSJason Gunthorpe wake_up_all(&subscriptions->wq); 16399cb252fSJason Gunthorpe } 16499cb252fSJason Gunthorpe 16599cb252fSJason Gunthorpe /** 16699cb252fSJason Gunthorpe * mmu_interval_read_begin - Begin a read side critical section against a VA 16799cb252fSJason Gunthorpe * range 16899cb252fSJason Gunthorpe * mni: The range to use 16999cb252fSJason Gunthorpe * 17099cb252fSJason Gunthorpe * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a 17199cb252fSJason Gunthorpe * collision-retry scheme similar to seqcount for the VA range under mni. If 17299cb252fSJason Gunthorpe * the mm invokes invalidation during the critical section then 17399cb252fSJason Gunthorpe * mmu_interval_read_retry() will return true. 17499cb252fSJason Gunthorpe * 17599cb252fSJason Gunthorpe * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs 17699cb252fSJason Gunthorpe * require a blocking context. The critical region formed by this can sleep, 17799cb252fSJason Gunthorpe * and the required 'user_lock' can also be a sleeping lock. 17899cb252fSJason Gunthorpe * 17999cb252fSJason Gunthorpe * The caller is required to provide a 'user_lock' to serialize both teardown 18099cb252fSJason Gunthorpe * and setup. 18199cb252fSJason Gunthorpe * 18299cb252fSJason Gunthorpe * The return value should be passed to mmu_interval_read_retry(). 18399cb252fSJason Gunthorpe */ 18499cb252fSJason Gunthorpe unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni) 18599cb252fSJason Gunthorpe { 186*984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions *subscriptions = 187*984cfe4eSJason Gunthorpe mni->mm->notifier_subscriptions; 18899cb252fSJason Gunthorpe unsigned long seq; 18999cb252fSJason Gunthorpe bool is_invalidating; 19099cb252fSJason Gunthorpe 19199cb252fSJason Gunthorpe /* 19299cb252fSJason Gunthorpe * If the mni has a different seq value under the user_lock than we 19399cb252fSJason Gunthorpe * started with then it has collided. 19499cb252fSJason Gunthorpe * 195*984cfe4eSJason Gunthorpe * If the mni currently has the same seq value as the subscriptions 196*984cfe4eSJason Gunthorpe * seq, then it is currently between invalidate_start/end and is 197*984cfe4eSJason Gunthorpe * colliding. 19899cb252fSJason Gunthorpe * 19999cb252fSJason Gunthorpe * The locking looks broadly like this: 20099cb252fSJason Gunthorpe * mn_tree_invalidate_start(): mmu_interval_read_begin(): 20199cb252fSJason Gunthorpe * spin_lock 20299cb252fSJason Gunthorpe * seq = READ_ONCE(mni->invalidate_seq); 203*984cfe4eSJason Gunthorpe * seq == subs->invalidate_seq 20499cb252fSJason Gunthorpe * spin_unlock 20599cb252fSJason Gunthorpe * spin_lock 206*984cfe4eSJason Gunthorpe * seq = ++subscriptions->invalidate_seq 20799cb252fSJason Gunthorpe * spin_unlock 20899cb252fSJason Gunthorpe * op->invalidate_range(): 20999cb252fSJason Gunthorpe * user_lock 21099cb252fSJason Gunthorpe * mmu_interval_set_seq() 21199cb252fSJason Gunthorpe * mni->invalidate_seq = seq 21299cb252fSJason Gunthorpe * user_unlock 21399cb252fSJason Gunthorpe * 21499cb252fSJason Gunthorpe * [Required: mmu_interval_read_retry() == true] 21599cb252fSJason Gunthorpe * 21699cb252fSJason Gunthorpe * mn_itree_inv_end(): 21799cb252fSJason Gunthorpe * spin_lock 218*984cfe4eSJason Gunthorpe * seq = ++subscriptions->invalidate_seq 21999cb252fSJason Gunthorpe * spin_unlock 22099cb252fSJason Gunthorpe * 22199cb252fSJason Gunthorpe * user_lock 22299cb252fSJason Gunthorpe * mmu_interval_read_retry(): 22399cb252fSJason Gunthorpe * mni->invalidate_seq != seq 22499cb252fSJason Gunthorpe * user_unlock 22599cb252fSJason Gunthorpe * 22699cb252fSJason Gunthorpe * Barriers are not needed here as any races here are closed by an 22799cb252fSJason Gunthorpe * eventual mmu_interval_read_retry(), which provides a barrier via the 22899cb252fSJason Gunthorpe * user_lock. 22999cb252fSJason Gunthorpe */ 230*984cfe4eSJason Gunthorpe spin_lock(&subscriptions->lock); 23199cb252fSJason Gunthorpe /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */ 23299cb252fSJason Gunthorpe seq = READ_ONCE(mni->invalidate_seq); 233*984cfe4eSJason Gunthorpe is_invalidating = seq == subscriptions->invalidate_seq; 234*984cfe4eSJason Gunthorpe spin_unlock(&subscriptions->lock); 23599cb252fSJason Gunthorpe 23699cb252fSJason Gunthorpe /* 23799cb252fSJason Gunthorpe * mni->invalidate_seq must always be set to an odd value via 23899cb252fSJason Gunthorpe * mmu_interval_set_seq() using the provided cur_seq from 23999cb252fSJason Gunthorpe * mn_itree_inv_start_range(). This ensures that if seq does wrap we 24099cb252fSJason Gunthorpe * will always clear the below sleep in some reasonable time as 241*984cfe4eSJason Gunthorpe * subscriptions->invalidate_seq is even in the idle state. 24299cb252fSJason Gunthorpe */ 24399cb252fSJason Gunthorpe lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 24499cb252fSJason Gunthorpe lock_map_release(&__mmu_notifier_invalidate_range_start_map); 24599cb252fSJason Gunthorpe if (is_invalidating) 246*984cfe4eSJason Gunthorpe wait_event(subscriptions->wq, 247*984cfe4eSJason Gunthorpe READ_ONCE(subscriptions->invalidate_seq) != seq); 24899cb252fSJason Gunthorpe 24999cb252fSJason Gunthorpe /* 25099cb252fSJason Gunthorpe * Notice that mmu_interval_read_retry() can already be true at this 25199cb252fSJason Gunthorpe * point, avoiding loops here allows the caller to provide a global 25299cb252fSJason Gunthorpe * time bound. 25399cb252fSJason Gunthorpe */ 25499cb252fSJason Gunthorpe 25599cb252fSJason Gunthorpe return seq; 25699cb252fSJason Gunthorpe } 25799cb252fSJason Gunthorpe EXPORT_SYMBOL_GPL(mmu_interval_read_begin); 25899cb252fSJason Gunthorpe 259*984cfe4eSJason Gunthorpe static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions, 26099cb252fSJason Gunthorpe struct mm_struct *mm) 26199cb252fSJason Gunthorpe { 26299cb252fSJason Gunthorpe struct mmu_notifier_range range = { 26399cb252fSJason Gunthorpe .flags = MMU_NOTIFIER_RANGE_BLOCKABLE, 26499cb252fSJason Gunthorpe .event = MMU_NOTIFY_RELEASE, 26599cb252fSJason Gunthorpe .mm = mm, 26699cb252fSJason Gunthorpe .start = 0, 26799cb252fSJason Gunthorpe .end = ULONG_MAX, 26899cb252fSJason Gunthorpe }; 26999cb252fSJason Gunthorpe struct mmu_interval_notifier *mni; 27099cb252fSJason Gunthorpe unsigned long cur_seq; 27199cb252fSJason Gunthorpe bool ret; 27299cb252fSJason Gunthorpe 273*984cfe4eSJason Gunthorpe for (mni = mn_itree_inv_start_range(subscriptions, &range, &cur_seq); 274*984cfe4eSJason Gunthorpe mni; mni = mn_itree_inv_next(mni, &range)) { 27599cb252fSJason Gunthorpe ret = mni->ops->invalidate(mni, &range, cur_seq); 27699cb252fSJason Gunthorpe WARN_ON(!ret); 27799cb252fSJason Gunthorpe } 27899cb252fSJason Gunthorpe 279*984cfe4eSJason Gunthorpe mn_itree_inv_end(subscriptions); 28099cb252fSJason Gunthorpe } 28199cb252fSJason Gunthorpe 28299cb252fSJason Gunthorpe /* 283cddb8a5cSAndrea Arcangeli * This function can't run concurrently against mmu_notifier_register 284cddb8a5cSAndrea Arcangeli * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap 285cddb8a5cSAndrea Arcangeli * runs with mm_users == 0. Other tasks may still invoke mmu notifiers 286cddb8a5cSAndrea Arcangeli * in parallel despite there being no task using this mm any more, 287cddb8a5cSAndrea Arcangeli * through the vmas outside of the exit_mmap context, such as with 288cddb8a5cSAndrea Arcangeli * vmtruncate. This serializes against mmu_notifier_unregister with 289*984cfe4eSJason Gunthorpe * the notifier_subscriptions->lock in addition to SRCU and it serializes 290*984cfe4eSJason Gunthorpe * against the other mmu notifiers with SRCU. struct mmu_notifier_subscriptions 291cddb8a5cSAndrea Arcangeli * can't go away from under us as exit_mmap holds an mm_count pin 292cddb8a5cSAndrea Arcangeli * itself. 293cddb8a5cSAndrea Arcangeli */ 294*984cfe4eSJason Gunthorpe static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions, 29599cb252fSJason Gunthorpe struct mm_struct *mm) 296cddb8a5cSAndrea Arcangeli { 297cddb8a5cSAndrea Arcangeli struct mmu_notifier *mn; 29821a92735SSagi Grimberg int id; 2993ad3d901SXiao Guangrong 3003ad3d901SXiao Guangrong /* 301d34883d4SXiao Guangrong * SRCU here will block mmu_notifier_unregister until 302d34883d4SXiao Guangrong * ->release returns. 3033ad3d901SXiao Guangrong */ 30421a92735SSagi Grimberg id = srcu_read_lock(&srcu); 305*984cfe4eSJason Gunthorpe hlist_for_each_entry_rcu(mn, &subscriptions->list, hlist) 306d34883d4SXiao Guangrong /* 307d34883d4SXiao Guangrong * If ->release runs before mmu_notifier_unregister it must be 308d34883d4SXiao Guangrong * handled, as it's the only way for the driver to flush all 309d34883d4SXiao Guangrong * existing sptes and stop the driver from establishing any more 310d34883d4SXiao Guangrong * sptes before all the pages in the mm are freed. 311d34883d4SXiao Guangrong */ 312d34883d4SXiao Guangrong if (mn->ops->release) 313d34883d4SXiao Guangrong mn->ops->release(mn, mm); 314d34883d4SXiao Guangrong 315*984cfe4eSJason Gunthorpe spin_lock(&subscriptions->lock); 316*984cfe4eSJason Gunthorpe while (unlikely(!hlist_empty(&subscriptions->list))) { 317*984cfe4eSJason Gunthorpe mn = hlist_entry(subscriptions->list.first, struct mmu_notifier, 318cddb8a5cSAndrea Arcangeli hlist); 319cddb8a5cSAndrea Arcangeli /* 320d34883d4SXiao Guangrong * We arrived before mmu_notifier_unregister so 321d34883d4SXiao Guangrong * mmu_notifier_unregister will do nothing other than to wait 322d34883d4SXiao Guangrong * for ->release to finish and for mmu_notifier_unregister to 323d34883d4SXiao Guangrong * return. 324cddb8a5cSAndrea Arcangeli */ 325cddb8a5cSAndrea Arcangeli hlist_del_init_rcu(&mn->hlist); 326cddb8a5cSAndrea Arcangeli } 327*984cfe4eSJason Gunthorpe spin_unlock(&subscriptions->lock); 328b972216eSPeter Zijlstra srcu_read_unlock(&srcu, id); 329cddb8a5cSAndrea Arcangeli 330cddb8a5cSAndrea Arcangeli /* 331d34883d4SXiao Guangrong * synchronize_srcu here prevents mmu_notifier_release from returning to 332d34883d4SXiao Guangrong * exit_mmap (which would proceed with freeing all pages in the mm) 333d34883d4SXiao Guangrong * until the ->release method returns, if it was invoked by 334d34883d4SXiao Guangrong * mmu_notifier_unregister. 335d34883d4SXiao Guangrong * 336*984cfe4eSJason Gunthorpe * The notifier_subscriptions can't go away from under us because 337*984cfe4eSJason Gunthorpe * one mm_count is held by exit_mmap. 338cddb8a5cSAndrea Arcangeli */ 33921a92735SSagi Grimberg synchronize_srcu(&srcu); 340cddb8a5cSAndrea Arcangeli } 341cddb8a5cSAndrea Arcangeli 34299cb252fSJason Gunthorpe void __mmu_notifier_release(struct mm_struct *mm) 34399cb252fSJason Gunthorpe { 344*984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions *subscriptions = 345*984cfe4eSJason Gunthorpe mm->notifier_subscriptions; 34699cb252fSJason Gunthorpe 347*984cfe4eSJason Gunthorpe if (subscriptions->has_itree) 348*984cfe4eSJason Gunthorpe mn_itree_release(subscriptions, mm); 34999cb252fSJason Gunthorpe 350*984cfe4eSJason Gunthorpe if (!hlist_empty(&subscriptions->list)) 351*984cfe4eSJason Gunthorpe mn_hlist_release(subscriptions, mm); 35299cb252fSJason Gunthorpe } 35399cb252fSJason Gunthorpe 354cddb8a5cSAndrea Arcangeli /* 355cddb8a5cSAndrea Arcangeli * If no young bitflag is supported by the hardware, ->clear_flush_young can 356cddb8a5cSAndrea Arcangeli * unmap the address and return 1 or 0 depending if the mapping previously 357cddb8a5cSAndrea Arcangeli * existed or not. 358cddb8a5cSAndrea Arcangeli */ 359cddb8a5cSAndrea Arcangeli int __mmu_notifier_clear_flush_young(struct mm_struct *mm, 36057128468SAndres Lagar-Cavilla unsigned long start, 36157128468SAndres Lagar-Cavilla unsigned long end) 362cddb8a5cSAndrea Arcangeli { 363cddb8a5cSAndrea Arcangeli struct mmu_notifier *mn; 36421a92735SSagi Grimberg int young = 0, id; 365cddb8a5cSAndrea Arcangeli 36621a92735SSagi Grimberg id = srcu_read_lock(&srcu); 367*984cfe4eSJason Gunthorpe hlist_for_each_entry_rcu(mn, &mm->notifier_subscriptions->list, hlist) { 368cddb8a5cSAndrea Arcangeli if (mn->ops->clear_flush_young) 36957128468SAndres Lagar-Cavilla young |= mn->ops->clear_flush_young(mn, mm, start, end); 370cddb8a5cSAndrea Arcangeli } 37121a92735SSagi Grimberg srcu_read_unlock(&srcu, id); 372cddb8a5cSAndrea Arcangeli 373cddb8a5cSAndrea Arcangeli return young; 374cddb8a5cSAndrea Arcangeli } 375cddb8a5cSAndrea Arcangeli 3761d7715c6SVladimir Davydov int __mmu_notifier_clear_young(struct mm_struct *mm, 3771d7715c6SVladimir Davydov unsigned long start, 3781d7715c6SVladimir Davydov unsigned long end) 3791d7715c6SVladimir Davydov { 3801d7715c6SVladimir Davydov struct mmu_notifier *mn; 3811d7715c6SVladimir Davydov int young = 0, id; 3821d7715c6SVladimir Davydov 3831d7715c6SVladimir Davydov id = srcu_read_lock(&srcu); 384*984cfe4eSJason Gunthorpe hlist_for_each_entry_rcu(mn, &mm->notifier_subscriptions->list, hlist) { 3851d7715c6SVladimir Davydov if (mn->ops->clear_young) 3861d7715c6SVladimir Davydov young |= mn->ops->clear_young(mn, mm, start, end); 3871d7715c6SVladimir Davydov } 3881d7715c6SVladimir Davydov srcu_read_unlock(&srcu, id); 3891d7715c6SVladimir Davydov 3901d7715c6SVladimir Davydov return young; 3911d7715c6SVladimir Davydov } 3921d7715c6SVladimir Davydov 3938ee53820SAndrea Arcangeli int __mmu_notifier_test_young(struct mm_struct *mm, 3948ee53820SAndrea Arcangeli unsigned long address) 3958ee53820SAndrea Arcangeli { 3968ee53820SAndrea Arcangeli struct mmu_notifier *mn; 39721a92735SSagi Grimberg int young = 0, id; 3988ee53820SAndrea Arcangeli 39921a92735SSagi Grimberg id = srcu_read_lock(&srcu); 400*984cfe4eSJason Gunthorpe hlist_for_each_entry_rcu(mn, &mm->notifier_subscriptions->list, hlist) { 4018ee53820SAndrea Arcangeli if (mn->ops->test_young) { 4028ee53820SAndrea Arcangeli young = mn->ops->test_young(mn, mm, address); 4038ee53820SAndrea Arcangeli if (young) 4048ee53820SAndrea Arcangeli break; 4058ee53820SAndrea Arcangeli } 4068ee53820SAndrea Arcangeli } 40721a92735SSagi Grimberg srcu_read_unlock(&srcu, id); 4088ee53820SAndrea Arcangeli 4098ee53820SAndrea Arcangeli return young; 4108ee53820SAndrea Arcangeli } 4118ee53820SAndrea Arcangeli 412828502d3SIzik Eidus void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, 413828502d3SIzik Eidus pte_t pte) 414828502d3SIzik Eidus { 415828502d3SIzik Eidus struct mmu_notifier *mn; 41621a92735SSagi Grimberg int id; 417828502d3SIzik Eidus 41821a92735SSagi Grimberg id = srcu_read_lock(&srcu); 419*984cfe4eSJason Gunthorpe hlist_for_each_entry_rcu(mn, &mm->notifier_subscriptions->list, 420*984cfe4eSJason Gunthorpe hlist) { 421828502d3SIzik Eidus if (mn->ops->change_pte) 422828502d3SIzik Eidus mn->ops->change_pte(mn, mm, address, pte); 423828502d3SIzik Eidus } 42421a92735SSagi Grimberg srcu_read_unlock(&srcu, id); 425828502d3SIzik Eidus } 426828502d3SIzik Eidus 427*984cfe4eSJason Gunthorpe static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions, 42899cb252fSJason Gunthorpe const struct mmu_notifier_range *range) 42999cb252fSJason Gunthorpe { 43099cb252fSJason Gunthorpe struct mmu_interval_notifier *mni; 43199cb252fSJason Gunthorpe unsigned long cur_seq; 43299cb252fSJason Gunthorpe 433*984cfe4eSJason Gunthorpe for (mni = mn_itree_inv_start_range(subscriptions, range, &cur_seq); 434*984cfe4eSJason Gunthorpe mni; mni = mn_itree_inv_next(mni, range)) { 43599cb252fSJason Gunthorpe bool ret; 43699cb252fSJason Gunthorpe 43799cb252fSJason Gunthorpe ret = mni->ops->invalidate(mni, range, cur_seq); 43899cb252fSJason Gunthorpe if (!ret) { 43999cb252fSJason Gunthorpe if (WARN_ON(mmu_notifier_range_blockable(range))) 44099cb252fSJason Gunthorpe continue; 44199cb252fSJason Gunthorpe goto out_would_block; 44299cb252fSJason Gunthorpe } 44399cb252fSJason Gunthorpe } 44499cb252fSJason Gunthorpe return 0; 44599cb252fSJason Gunthorpe 44699cb252fSJason Gunthorpe out_would_block: 44799cb252fSJason Gunthorpe /* 44899cb252fSJason Gunthorpe * On -EAGAIN the non-blocking caller is not allowed to call 44999cb252fSJason Gunthorpe * invalidate_range_end() 45099cb252fSJason Gunthorpe */ 451*984cfe4eSJason Gunthorpe mn_itree_inv_end(subscriptions); 45299cb252fSJason Gunthorpe return -EAGAIN; 45399cb252fSJason Gunthorpe } 45499cb252fSJason Gunthorpe 455*984cfe4eSJason Gunthorpe static int mn_hlist_invalidate_range_start( 456*984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions *subscriptions, 45799cb252fSJason Gunthorpe struct mmu_notifier_range *range) 458cddb8a5cSAndrea Arcangeli { 459cddb8a5cSAndrea Arcangeli struct mmu_notifier *mn; 46093065ac7SMichal Hocko int ret = 0; 46121a92735SSagi Grimberg int id; 462cddb8a5cSAndrea Arcangeli 46321a92735SSagi Grimberg id = srcu_read_lock(&srcu); 464*984cfe4eSJason Gunthorpe hlist_for_each_entry_rcu(mn, &subscriptions->list, hlist) { 46593065ac7SMichal Hocko if (mn->ops->invalidate_range_start) { 466ba170f76SDaniel Vetter int _ret; 467ba170f76SDaniel Vetter 468ba170f76SDaniel Vetter if (!mmu_notifier_range_blockable(range)) 469ba170f76SDaniel Vetter non_block_start(); 470ba170f76SDaniel Vetter _ret = mn->ops->invalidate_range_start(mn, range); 471ba170f76SDaniel Vetter if (!mmu_notifier_range_blockable(range)) 472ba170f76SDaniel Vetter non_block_end(); 47393065ac7SMichal Hocko if (_ret) { 47493065ac7SMichal Hocko pr_info("%pS callback failed with %d in %sblockable context.\n", 47593065ac7SMichal Hocko mn->ops->invalidate_range_start, _ret, 476dfcd6660SJérôme Glisse !mmu_notifier_range_blockable(range) ? "non-" : ""); 4778402ce61SDaniel Vetter WARN_ON(mmu_notifier_range_blockable(range) || 478df2ec764SJason Gunthorpe _ret != -EAGAIN); 47993065ac7SMichal Hocko ret = _ret; 48093065ac7SMichal Hocko } 48193065ac7SMichal Hocko } 482cddb8a5cSAndrea Arcangeli } 48321a92735SSagi Grimberg srcu_read_unlock(&srcu, id); 48493065ac7SMichal Hocko 48593065ac7SMichal Hocko return ret; 486cddb8a5cSAndrea Arcangeli } 487cddb8a5cSAndrea Arcangeli 48899cb252fSJason Gunthorpe int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) 48999cb252fSJason Gunthorpe { 490*984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions *subscriptions = 491*984cfe4eSJason Gunthorpe range->mm->notifier_subscriptions; 49299cb252fSJason Gunthorpe int ret; 49399cb252fSJason Gunthorpe 494*984cfe4eSJason Gunthorpe if (subscriptions->has_itree) { 495*984cfe4eSJason Gunthorpe ret = mn_itree_invalidate(subscriptions, range); 49699cb252fSJason Gunthorpe if (ret) 49799cb252fSJason Gunthorpe return ret; 49899cb252fSJason Gunthorpe } 499*984cfe4eSJason Gunthorpe if (!hlist_empty(&subscriptions->list)) 500*984cfe4eSJason Gunthorpe return mn_hlist_invalidate_range_start(subscriptions, range); 50199cb252fSJason Gunthorpe return 0; 50299cb252fSJason Gunthorpe } 50399cb252fSJason Gunthorpe 504*984cfe4eSJason Gunthorpe static void 505*984cfe4eSJason Gunthorpe mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions, 506*984cfe4eSJason Gunthorpe struct mmu_notifier_range *range, bool only_end) 507cddb8a5cSAndrea Arcangeli { 508cddb8a5cSAndrea Arcangeli struct mmu_notifier *mn; 50921a92735SSagi Grimberg int id; 510cddb8a5cSAndrea Arcangeli 51121a92735SSagi Grimberg id = srcu_read_lock(&srcu); 512*984cfe4eSJason Gunthorpe hlist_for_each_entry_rcu(mn, &subscriptions->list, hlist) { 5130f0a327fSJoerg Roedel /* 5140f0a327fSJoerg Roedel * Call invalidate_range here too to avoid the need for the 5150f0a327fSJoerg Roedel * subsystem of having to register an invalidate_range_end 5160f0a327fSJoerg Roedel * call-back when there is invalidate_range already. Usually a 5170f0a327fSJoerg Roedel * subsystem registers either invalidate_range_start()/end() or 5180f0a327fSJoerg Roedel * invalidate_range(), so this will be no additional overhead 5190f0a327fSJoerg Roedel * (besides the pointer check). 5204645b9feSJérôme Glisse * 5214645b9feSJérôme Glisse * We skip call to invalidate_range() if we know it is safe ie 5224645b9feSJérôme Glisse * call site use mmu_notifier_invalidate_range_only_end() which 5234645b9feSJérôme Glisse * is safe to do when we know that a call to invalidate_range() 5244645b9feSJérôme Glisse * already happen under page table lock. 5250f0a327fSJoerg Roedel */ 5264645b9feSJérôme Glisse if (!only_end && mn->ops->invalidate_range) 527ac46d4f3SJérôme Glisse mn->ops->invalidate_range(mn, range->mm, 528ac46d4f3SJérôme Glisse range->start, 529ac46d4f3SJérôme Glisse range->end); 530ba170f76SDaniel Vetter if (mn->ops->invalidate_range_end) { 531ba170f76SDaniel Vetter if (!mmu_notifier_range_blockable(range)) 532ba170f76SDaniel Vetter non_block_start(); 5335d6527a7SJérôme Glisse mn->ops->invalidate_range_end(mn, range); 534ba170f76SDaniel Vetter if (!mmu_notifier_range_blockable(range)) 535ba170f76SDaniel Vetter non_block_end(); 536ba170f76SDaniel Vetter } 537cddb8a5cSAndrea Arcangeli } 53821a92735SSagi Grimberg srcu_read_unlock(&srcu, id); 53999cb252fSJason Gunthorpe } 54099cb252fSJason Gunthorpe 54199cb252fSJason Gunthorpe void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range, 54299cb252fSJason Gunthorpe bool only_end) 54399cb252fSJason Gunthorpe { 544*984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions *subscriptions = 545*984cfe4eSJason Gunthorpe range->mm->notifier_subscriptions; 54699cb252fSJason Gunthorpe 54799cb252fSJason Gunthorpe lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 548*984cfe4eSJason Gunthorpe if (subscriptions->has_itree) 549*984cfe4eSJason Gunthorpe mn_itree_inv_end(subscriptions); 55099cb252fSJason Gunthorpe 551*984cfe4eSJason Gunthorpe if (!hlist_empty(&subscriptions->list)) 552*984cfe4eSJason Gunthorpe mn_hlist_invalidate_end(subscriptions, range, only_end); 55323b68395SDaniel Vetter lock_map_release(&__mmu_notifier_invalidate_range_start_map); 554cddb8a5cSAndrea Arcangeli } 555cddb8a5cSAndrea Arcangeli 5560f0a327fSJoerg Roedel void __mmu_notifier_invalidate_range(struct mm_struct *mm, 5570f0a327fSJoerg Roedel unsigned long start, unsigned long end) 5580f0a327fSJoerg Roedel { 5590f0a327fSJoerg Roedel struct mmu_notifier *mn; 5600f0a327fSJoerg Roedel int id; 5610f0a327fSJoerg Roedel 5620f0a327fSJoerg Roedel id = srcu_read_lock(&srcu); 563*984cfe4eSJason Gunthorpe hlist_for_each_entry_rcu(mn, &mm->notifier_subscriptions->list, hlist) { 5640f0a327fSJoerg Roedel if (mn->ops->invalidate_range) 5650f0a327fSJoerg Roedel mn->ops->invalidate_range(mn, mm, start, end); 5660f0a327fSJoerg Roedel } 5670f0a327fSJoerg Roedel srcu_read_unlock(&srcu, id); 5680f0a327fSJoerg Roedel } 5690f0a327fSJoerg Roedel 57056c57103SJason Gunthorpe /* 57199cb252fSJason Gunthorpe * Same as mmu_notifier_register but here the caller must hold the mmap_sem in 57299cb252fSJason Gunthorpe * write mode. A NULL mn signals the notifier is being registered for itree 57399cb252fSJason Gunthorpe * mode. 57456c57103SJason Gunthorpe */ 57556c57103SJason Gunthorpe int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) 576cddb8a5cSAndrea Arcangeli { 577*984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions *subscriptions = NULL; 578cddb8a5cSAndrea Arcangeli int ret; 579cddb8a5cSAndrea Arcangeli 58056c57103SJason Gunthorpe lockdep_assert_held_write(&mm->mmap_sem); 581cddb8a5cSAndrea Arcangeli BUG_ON(atomic_read(&mm->mm_users) <= 0); 582cddb8a5cSAndrea Arcangeli 58366204f1dSDaniel Vetter if (IS_ENABLED(CONFIG_LOCKDEP)) { 58466204f1dSDaniel Vetter fs_reclaim_acquire(GFP_KERNEL); 58566204f1dSDaniel Vetter lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 58666204f1dSDaniel Vetter lock_map_release(&__mmu_notifier_invalidate_range_start_map); 58766204f1dSDaniel Vetter fs_reclaim_release(GFP_KERNEL); 58866204f1dSDaniel Vetter } 58966204f1dSDaniel Vetter 590*984cfe4eSJason Gunthorpe if (!mm->notifier_subscriptions) { 59170df291bSJason Gunthorpe /* 59270df291bSJason Gunthorpe * kmalloc cannot be called under mm_take_all_locks(), but we 593*984cfe4eSJason Gunthorpe * know that mm->notifier_subscriptions can't change while we 594*984cfe4eSJason Gunthorpe * hold the write side of the mmap_sem. 59570df291bSJason Gunthorpe */ 596*984cfe4eSJason Gunthorpe subscriptions = kzalloc( 597*984cfe4eSJason Gunthorpe sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL); 598*984cfe4eSJason Gunthorpe if (!subscriptions) 59956c57103SJason Gunthorpe return -ENOMEM; 60035cfa2b0SGavin Shan 601*984cfe4eSJason Gunthorpe INIT_HLIST_HEAD(&subscriptions->list); 602*984cfe4eSJason Gunthorpe spin_lock_init(&subscriptions->lock); 603*984cfe4eSJason Gunthorpe subscriptions->invalidate_seq = 2; 604*984cfe4eSJason Gunthorpe subscriptions->itree = RB_ROOT_CACHED; 605*984cfe4eSJason Gunthorpe init_waitqueue_head(&subscriptions->wq); 606*984cfe4eSJason Gunthorpe INIT_HLIST_HEAD(&subscriptions->deferred_list); 60770df291bSJason Gunthorpe } 60870df291bSJason Gunthorpe 609cddb8a5cSAndrea Arcangeli ret = mm_take_all_locks(mm); 610cddb8a5cSAndrea Arcangeli if (unlikely(ret)) 61135cfa2b0SGavin Shan goto out_clean; 612cddb8a5cSAndrea Arcangeli 613cddb8a5cSAndrea Arcangeli /* 614cddb8a5cSAndrea Arcangeli * Serialize the update against mmu_notifier_unregister. A 615cddb8a5cSAndrea Arcangeli * side note: mmu_notifier_release can't run concurrently with 616cddb8a5cSAndrea Arcangeli * us because we hold the mm_users pin (either implicitly as 617cddb8a5cSAndrea Arcangeli * current->mm or explicitly with get_task_mm() or similar). 618cddb8a5cSAndrea Arcangeli * We can't race against any other mmu notifier method either 619cddb8a5cSAndrea Arcangeli * thanks to mm_take_all_locks(). 62099cb252fSJason Gunthorpe * 621*984cfe4eSJason Gunthorpe * release semantics on the initialization of the 622*984cfe4eSJason Gunthorpe * mmu_notifier_subscriptions's contents are provided for unlocked 623*984cfe4eSJason Gunthorpe * readers. acquire can only be used while holding the mmgrab or 624*984cfe4eSJason Gunthorpe * mmget, and is safe because once created the 625*984cfe4eSJason Gunthorpe * mmu_notifier_subscriptions is not freed until the mm is destroyed. 626*984cfe4eSJason Gunthorpe * As above, users holding the mmap_sem or one of the 62799cb252fSJason Gunthorpe * mm_take_all_locks() do not need to use acquire semantics. 628cddb8a5cSAndrea Arcangeli */ 629*984cfe4eSJason Gunthorpe if (subscriptions) 630*984cfe4eSJason Gunthorpe smp_store_release(&mm->notifier_subscriptions, subscriptions); 63199cb252fSJason Gunthorpe 63299cb252fSJason Gunthorpe if (mn) { 63399cb252fSJason Gunthorpe /* Pairs with the mmdrop in mmu_notifier_unregister_* */ 63499cb252fSJason Gunthorpe mmgrab(mm); 63599cb252fSJason Gunthorpe mn->mm = mm; 63699cb252fSJason Gunthorpe mn->users = 1; 63770df291bSJason Gunthorpe 638*984cfe4eSJason Gunthorpe spin_lock(&mm->notifier_subscriptions->lock); 639*984cfe4eSJason Gunthorpe hlist_add_head_rcu(&mn->hlist, 640*984cfe4eSJason Gunthorpe &mm->notifier_subscriptions->list); 641*984cfe4eSJason Gunthorpe spin_unlock(&mm->notifier_subscriptions->lock); 64299cb252fSJason Gunthorpe } else 643*984cfe4eSJason Gunthorpe mm->notifier_subscriptions->has_itree = true; 644cddb8a5cSAndrea Arcangeli 645cddb8a5cSAndrea Arcangeli mm_drop_all_locks(mm); 64670df291bSJason Gunthorpe BUG_ON(atomic_read(&mm->mm_users) <= 0); 64770df291bSJason Gunthorpe return 0; 64870df291bSJason Gunthorpe 64935cfa2b0SGavin Shan out_clean: 650*984cfe4eSJason Gunthorpe kfree(subscriptions); 651cddb8a5cSAndrea Arcangeli return ret; 652cddb8a5cSAndrea Arcangeli } 65356c57103SJason Gunthorpe EXPORT_SYMBOL_GPL(__mmu_notifier_register); 654cddb8a5cSAndrea Arcangeli 6552c7933f5SJason Gunthorpe /** 6562c7933f5SJason Gunthorpe * mmu_notifier_register - Register a notifier on a mm 6572c7933f5SJason Gunthorpe * @mn: The notifier to attach 6582c7933f5SJason Gunthorpe * @mm: The mm to attach the notifier to 6592c7933f5SJason Gunthorpe * 660cddb8a5cSAndrea Arcangeli * Must not hold mmap_sem nor any other VM related lock when calling 661cddb8a5cSAndrea Arcangeli * this registration function. Must also ensure mm_users can't go down 662cddb8a5cSAndrea Arcangeli * to zero while this runs to avoid races with mmu_notifier_release, 663cddb8a5cSAndrea Arcangeli * so mm has to be current->mm or the mm should be pinned safely such 664cddb8a5cSAndrea Arcangeli * as with get_task_mm(). If the mm is not current->mm, the mm_users 665cddb8a5cSAndrea Arcangeli * pin should be released by calling mmput after mmu_notifier_register 6662c7933f5SJason Gunthorpe * returns. 6672c7933f5SJason Gunthorpe * 6682c7933f5SJason Gunthorpe * mmu_notifier_unregister() or mmu_notifier_put() must be always called to 6692c7933f5SJason Gunthorpe * unregister the notifier. 6702c7933f5SJason Gunthorpe * 6712c7933f5SJason Gunthorpe * While the caller has a mmu_notifier get the mn->mm pointer will remain 6722c7933f5SJason Gunthorpe * valid, and can be converted to an active mm pointer via mmget_not_zero(). 673cddb8a5cSAndrea Arcangeli */ 674cddb8a5cSAndrea Arcangeli int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) 675cddb8a5cSAndrea Arcangeli { 67656c57103SJason Gunthorpe int ret; 67756c57103SJason Gunthorpe 67856c57103SJason Gunthorpe down_write(&mm->mmap_sem); 67956c57103SJason Gunthorpe ret = __mmu_notifier_register(mn, mm); 68056c57103SJason Gunthorpe up_write(&mm->mmap_sem); 68156c57103SJason Gunthorpe return ret; 682cddb8a5cSAndrea Arcangeli } 683cddb8a5cSAndrea Arcangeli EXPORT_SYMBOL_GPL(mmu_notifier_register); 684cddb8a5cSAndrea Arcangeli 6852c7933f5SJason Gunthorpe static struct mmu_notifier * 6862c7933f5SJason Gunthorpe find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops) 6872c7933f5SJason Gunthorpe { 6882c7933f5SJason Gunthorpe struct mmu_notifier *mn; 6892c7933f5SJason Gunthorpe 690*984cfe4eSJason Gunthorpe spin_lock(&mm->notifier_subscriptions->lock); 691*984cfe4eSJason Gunthorpe hlist_for_each_entry_rcu(mn, &mm->notifier_subscriptions->list, 692*984cfe4eSJason Gunthorpe hlist) { 6932c7933f5SJason Gunthorpe if (mn->ops != ops) 6942c7933f5SJason Gunthorpe continue; 6952c7933f5SJason Gunthorpe 6962c7933f5SJason Gunthorpe if (likely(mn->users != UINT_MAX)) 6972c7933f5SJason Gunthorpe mn->users++; 6982c7933f5SJason Gunthorpe else 6992c7933f5SJason Gunthorpe mn = ERR_PTR(-EOVERFLOW); 700*984cfe4eSJason Gunthorpe spin_unlock(&mm->notifier_subscriptions->lock); 7012c7933f5SJason Gunthorpe return mn; 7022c7933f5SJason Gunthorpe } 703*984cfe4eSJason Gunthorpe spin_unlock(&mm->notifier_subscriptions->lock); 7042c7933f5SJason Gunthorpe return NULL; 7052c7933f5SJason Gunthorpe } 7062c7933f5SJason Gunthorpe 7072c7933f5SJason Gunthorpe /** 7082c7933f5SJason Gunthorpe * mmu_notifier_get_locked - Return the single struct mmu_notifier for 7092c7933f5SJason Gunthorpe * the mm & ops 7102c7933f5SJason Gunthorpe * @ops: The operations struct being subscribe with 7112c7933f5SJason Gunthorpe * @mm : The mm to attach notifiers too 7122c7933f5SJason Gunthorpe * 7132c7933f5SJason Gunthorpe * This function either allocates a new mmu_notifier via 7142c7933f5SJason Gunthorpe * ops->alloc_notifier(), or returns an already existing notifier on the 7152c7933f5SJason Gunthorpe * list. The value of the ops pointer is used to determine when two notifiers 7162c7933f5SJason Gunthorpe * are the same. 7172c7933f5SJason Gunthorpe * 7182c7933f5SJason Gunthorpe * Each call to mmu_notifier_get() must be paired with a call to 7192c7933f5SJason Gunthorpe * mmu_notifier_put(). The caller must hold the write side of mm->mmap_sem. 7202c7933f5SJason Gunthorpe * 7212c7933f5SJason Gunthorpe * While the caller has a mmu_notifier get the mm pointer will remain valid, 7222c7933f5SJason Gunthorpe * and can be converted to an active mm pointer via mmget_not_zero(). 7232c7933f5SJason Gunthorpe */ 7242c7933f5SJason Gunthorpe struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops, 7252c7933f5SJason Gunthorpe struct mm_struct *mm) 7262c7933f5SJason Gunthorpe { 7272c7933f5SJason Gunthorpe struct mmu_notifier *mn; 7282c7933f5SJason Gunthorpe int ret; 7292c7933f5SJason Gunthorpe 7302c7933f5SJason Gunthorpe lockdep_assert_held_write(&mm->mmap_sem); 7312c7933f5SJason Gunthorpe 732*984cfe4eSJason Gunthorpe if (mm->notifier_subscriptions) { 7332c7933f5SJason Gunthorpe mn = find_get_mmu_notifier(mm, ops); 7342c7933f5SJason Gunthorpe if (mn) 7352c7933f5SJason Gunthorpe return mn; 7362c7933f5SJason Gunthorpe } 7372c7933f5SJason Gunthorpe 7382c7933f5SJason Gunthorpe mn = ops->alloc_notifier(mm); 7392c7933f5SJason Gunthorpe if (IS_ERR(mn)) 7402c7933f5SJason Gunthorpe return mn; 7412c7933f5SJason Gunthorpe mn->ops = ops; 7422c7933f5SJason Gunthorpe ret = __mmu_notifier_register(mn, mm); 7432c7933f5SJason Gunthorpe if (ret) 7442c7933f5SJason Gunthorpe goto out_free; 7452c7933f5SJason Gunthorpe return mn; 7462c7933f5SJason Gunthorpe out_free: 7472c7933f5SJason Gunthorpe mn->ops->free_notifier(mn); 7482c7933f5SJason Gunthorpe return ERR_PTR(ret); 7492c7933f5SJason Gunthorpe } 7502c7933f5SJason Gunthorpe EXPORT_SYMBOL_GPL(mmu_notifier_get_locked); 7512c7933f5SJason Gunthorpe 752cddb8a5cSAndrea Arcangeli /* this is called after the last mmu_notifier_unregister() returned */ 753*984cfe4eSJason Gunthorpe void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm) 754cddb8a5cSAndrea Arcangeli { 755*984cfe4eSJason Gunthorpe BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list)); 756*984cfe4eSJason Gunthorpe kfree(mm->notifier_subscriptions); 757*984cfe4eSJason Gunthorpe mm->notifier_subscriptions = LIST_POISON1; /* debug */ 758cddb8a5cSAndrea Arcangeli } 759cddb8a5cSAndrea Arcangeli 760cddb8a5cSAndrea Arcangeli /* 761cddb8a5cSAndrea Arcangeli * This releases the mm_count pin automatically and frees the mm 762cddb8a5cSAndrea Arcangeli * structure if it was the last user of it. It serializes against 76321a92735SSagi Grimberg * running mmu notifiers with SRCU and against mmu_notifier_unregister 76421a92735SSagi Grimberg * with the unregister lock + SRCU. All sptes must be dropped before 765cddb8a5cSAndrea Arcangeli * calling mmu_notifier_unregister. ->release or any other notifier 766cddb8a5cSAndrea Arcangeli * method may be invoked concurrently with mmu_notifier_unregister, 767cddb8a5cSAndrea Arcangeli * and only after mmu_notifier_unregister returned we're guaranteed 768cddb8a5cSAndrea Arcangeli * that ->release or any other method can't run anymore. 769cddb8a5cSAndrea Arcangeli */ 770cddb8a5cSAndrea Arcangeli void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) 771cddb8a5cSAndrea Arcangeli { 772cddb8a5cSAndrea Arcangeli BUG_ON(atomic_read(&mm->mm_count) <= 0); 773cddb8a5cSAndrea Arcangeli 774cddb8a5cSAndrea Arcangeli if (!hlist_unhashed(&mn->hlist)) { 775d34883d4SXiao Guangrong /* 776d34883d4SXiao Guangrong * SRCU here will force exit_mmap to wait for ->release to 777d34883d4SXiao Guangrong * finish before freeing the pages. 778d34883d4SXiao Guangrong */ 77921a92735SSagi Grimberg int id; 7803ad3d901SXiao Guangrong 781751efd86SRobin Holt id = srcu_read_lock(&srcu); 782d34883d4SXiao Guangrong /* 783d34883d4SXiao Guangrong * exit_mmap will block in mmu_notifier_release to guarantee 784d34883d4SXiao Guangrong * that ->release is called before freeing the pages. 785d34883d4SXiao Guangrong */ 786751efd86SRobin Holt if (mn->ops->release) 787751efd86SRobin Holt mn->ops->release(mn, mm); 788751efd86SRobin Holt srcu_read_unlock(&srcu, id); 789d34883d4SXiao Guangrong 790*984cfe4eSJason Gunthorpe spin_lock(&mm->notifier_subscriptions->lock); 791d34883d4SXiao Guangrong /* 792d34883d4SXiao Guangrong * Can not use list_del_rcu() since __mmu_notifier_release 793d34883d4SXiao Guangrong * can delete it before we hold the lock. 794d34883d4SXiao Guangrong */ 795d34883d4SXiao Guangrong hlist_del_init_rcu(&mn->hlist); 796*984cfe4eSJason Gunthorpe spin_unlock(&mm->notifier_subscriptions->lock); 797d34883d4SXiao Guangrong } 798751efd86SRobin Holt 799751efd86SRobin Holt /* 800d34883d4SXiao Guangrong * Wait for any running method to finish, of course including 80183a35e36SGeert Uytterhoeven * ->release if it was run by mmu_notifier_release instead of us. 802cddb8a5cSAndrea Arcangeli */ 80321a92735SSagi Grimberg synchronize_srcu(&srcu); 804cddb8a5cSAndrea Arcangeli 805cddb8a5cSAndrea Arcangeli BUG_ON(atomic_read(&mm->mm_count) <= 0); 806cddb8a5cSAndrea Arcangeli 807cddb8a5cSAndrea Arcangeli mmdrop(mm); 808cddb8a5cSAndrea Arcangeli } 809cddb8a5cSAndrea Arcangeli EXPORT_SYMBOL_GPL(mmu_notifier_unregister); 81021a92735SSagi Grimberg 8112c7933f5SJason Gunthorpe static void mmu_notifier_free_rcu(struct rcu_head *rcu) 8122c7933f5SJason Gunthorpe { 8132c7933f5SJason Gunthorpe struct mmu_notifier *mn = container_of(rcu, struct mmu_notifier, rcu); 8142c7933f5SJason Gunthorpe struct mm_struct *mm = mn->mm; 8152c7933f5SJason Gunthorpe 8162c7933f5SJason Gunthorpe mn->ops->free_notifier(mn); 8172c7933f5SJason Gunthorpe /* Pairs with the get in __mmu_notifier_register() */ 8182c7933f5SJason Gunthorpe mmdrop(mm); 8192c7933f5SJason Gunthorpe } 8202c7933f5SJason Gunthorpe 8212c7933f5SJason Gunthorpe /** 8222c7933f5SJason Gunthorpe * mmu_notifier_put - Release the reference on the notifier 8232c7933f5SJason Gunthorpe * @mn: The notifier to act on 8242c7933f5SJason Gunthorpe * 8252c7933f5SJason Gunthorpe * This function must be paired with each mmu_notifier_get(), it releases the 8262c7933f5SJason Gunthorpe * reference obtained by the get. If this is the last reference then process 8272c7933f5SJason Gunthorpe * to free the notifier will be run asynchronously. 8282c7933f5SJason Gunthorpe * 8292c7933f5SJason Gunthorpe * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release 8302c7933f5SJason Gunthorpe * when the mm_struct is destroyed. Instead free_notifier is always called to 8312c7933f5SJason Gunthorpe * release any resources held by the user. 8322c7933f5SJason Gunthorpe * 8332c7933f5SJason Gunthorpe * As ops->release is not guaranteed to be called, the user must ensure that 8342c7933f5SJason Gunthorpe * all sptes are dropped, and no new sptes can be established before 8352c7933f5SJason Gunthorpe * mmu_notifier_put() is called. 8362c7933f5SJason Gunthorpe * 8372c7933f5SJason Gunthorpe * This function can be called from the ops->release callback, however the 8382c7933f5SJason Gunthorpe * caller must still ensure it is called pairwise with mmu_notifier_get(). 8392c7933f5SJason Gunthorpe * 8402c7933f5SJason Gunthorpe * Modules calling this function must call mmu_notifier_synchronize() in 8412c7933f5SJason Gunthorpe * their __exit functions to ensure the async work is completed. 8422c7933f5SJason Gunthorpe */ 8432c7933f5SJason Gunthorpe void mmu_notifier_put(struct mmu_notifier *mn) 8442c7933f5SJason Gunthorpe { 8452c7933f5SJason Gunthorpe struct mm_struct *mm = mn->mm; 8462c7933f5SJason Gunthorpe 847*984cfe4eSJason Gunthorpe spin_lock(&mm->notifier_subscriptions->lock); 8482c7933f5SJason Gunthorpe if (WARN_ON(!mn->users) || --mn->users) 8492c7933f5SJason Gunthorpe goto out_unlock; 8502c7933f5SJason Gunthorpe hlist_del_init_rcu(&mn->hlist); 851*984cfe4eSJason Gunthorpe spin_unlock(&mm->notifier_subscriptions->lock); 8522c7933f5SJason Gunthorpe 8532c7933f5SJason Gunthorpe call_srcu(&srcu, &mn->rcu, mmu_notifier_free_rcu); 8542c7933f5SJason Gunthorpe return; 8552c7933f5SJason Gunthorpe 8562c7933f5SJason Gunthorpe out_unlock: 857*984cfe4eSJason Gunthorpe spin_unlock(&mm->notifier_subscriptions->lock); 8582c7933f5SJason Gunthorpe } 8592c7933f5SJason Gunthorpe EXPORT_SYMBOL_GPL(mmu_notifier_put); 8602c7933f5SJason Gunthorpe 86199cb252fSJason Gunthorpe static int __mmu_interval_notifier_insert( 86299cb252fSJason Gunthorpe struct mmu_interval_notifier *mni, struct mm_struct *mm, 863*984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions *subscriptions, unsigned long start, 86499cb252fSJason Gunthorpe unsigned long length, const struct mmu_interval_notifier_ops *ops) 86599cb252fSJason Gunthorpe { 86699cb252fSJason Gunthorpe mni->mm = mm; 86799cb252fSJason Gunthorpe mni->ops = ops; 86899cb252fSJason Gunthorpe RB_CLEAR_NODE(&mni->interval_tree.rb); 86999cb252fSJason Gunthorpe mni->interval_tree.start = start; 87099cb252fSJason Gunthorpe /* 87199cb252fSJason Gunthorpe * Note that the representation of the intervals in the interval tree 87299cb252fSJason Gunthorpe * considers the ending point as contained in the interval. 87399cb252fSJason Gunthorpe */ 87499cb252fSJason Gunthorpe if (length == 0 || 87599cb252fSJason Gunthorpe check_add_overflow(start, length - 1, &mni->interval_tree.last)) 87699cb252fSJason Gunthorpe return -EOVERFLOW; 87799cb252fSJason Gunthorpe 87899cb252fSJason Gunthorpe /* Must call with a mmget() held */ 87999cb252fSJason Gunthorpe if (WARN_ON(atomic_read(&mm->mm_count) <= 0)) 88099cb252fSJason Gunthorpe return -EINVAL; 88199cb252fSJason Gunthorpe 88299cb252fSJason Gunthorpe /* pairs with mmdrop in mmu_interval_notifier_remove() */ 88399cb252fSJason Gunthorpe mmgrab(mm); 88499cb252fSJason Gunthorpe 88599cb252fSJason Gunthorpe /* 88699cb252fSJason Gunthorpe * If some invalidate_range_start/end region is going on in parallel 88799cb252fSJason Gunthorpe * we don't know what VA ranges are affected, so we must assume this 88899cb252fSJason Gunthorpe * new range is included. 88999cb252fSJason Gunthorpe * 89099cb252fSJason Gunthorpe * If the itree is invalidating then we are not allowed to change 89199cb252fSJason Gunthorpe * it. Retrying until invalidation is done is tricky due to the 89299cb252fSJason Gunthorpe * possibility for live lock, instead defer the add to 89399cb252fSJason Gunthorpe * mn_itree_inv_end() so this algorithm is deterministic. 89499cb252fSJason Gunthorpe * 89599cb252fSJason Gunthorpe * In all cases the value for the mni->invalidate_seq should be 89699cb252fSJason Gunthorpe * odd, see mmu_interval_read_begin() 89799cb252fSJason Gunthorpe */ 898*984cfe4eSJason Gunthorpe spin_lock(&subscriptions->lock); 899*984cfe4eSJason Gunthorpe if (subscriptions->active_invalidate_ranges) { 900*984cfe4eSJason Gunthorpe if (mn_itree_is_invalidating(subscriptions)) 90199cb252fSJason Gunthorpe hlist_add_head(&mni->deferred_item, 902*984cfe4eSJason Gunthorpe &subscriptions->deferred_list); 90399cb252fSJason Gunthorpe else { 904*984cfe4eSJason Gunthorpe subscriptions->invalidate_seq |= 1; 90599cb252fSJason Gunthorpe interval_tree_insert(&mni->interval_tree, 906*984cfe4eSJason Gunthorpe &subscriptions->itree); 90799cb252fSJason Gunthorpe } 908*984cfe4eSJason Gunthorpe mni->invalidate_seq = subscriptions->invalidate_seq; 90999cb252fSJason Gunthorpe } else { 910*984cfe4eSJason Gunthorpe WARN_ON(mn_itree_is_invalidating(subscriptions)); 91199cb252fSJason Gunthorpe /* 91299cb252fSJason Gunthorpe * The starting seq for a mni not under invalidation should be 91399cb252fSJason Gunthorpe * odd, not equal to the current invalidate_seq and 91499cb252fSJason Gunthorpe * invalidate_seq should not 'wrap' to the new seq any time 91599cb252fSJason Gunthorpe * soon. 91699cb252fSJason Gunthorpe */ 917*984cfe4eSJason Gunthorpe mni->invalidate_seq = subscriptions->invalidate_seq - 1; 918*984cfe4eSJason Gunthorpe interval_tree_insert(&mni->interval_tree, 919*984cfe4eSJason Gunthorpe &subscriptions->itree); 92099cb252fSJason Gunthorpe } 921*984cfe4eSJason Gunthorpe spin_unlock(&subscriptions->lock); 92299cb252fSJason Gunthorpe return 0; 92399cb252fSJason Gunthorpe } 92499cb252fSJason Gunthorpe 92599cb252fSJason Gunthorpe /** 92699cb252fSJason Gunthorpe * mmu_interval_notifier_insert - Insert an interval notifier 92799cb252fSJason Gunthorpe * @mni: Interval notifier to register 92899cb252fSJason Gunthorpe * @start: Starting virtual address to monitor 92999cb252fSJason Gunthorpe * @length: Length of the range to monitor 93099cb252fSJason Gunthorpe * @mm : mm_struct to attach to 93199cb252fSJason Gunthorpe * 93299cb252fSJason Gunthorpe * This function subscribes the interval notifier for notifications from the 93399cb252fSJason Gunthorpe * mm. Upon return the ops related to mmu_interval_notifier will be called 93499cb252fSJason Gunthorpe * whenever an event that intersects with the given range occurs. 93599cb252fSJason Gunthorpe * 93699cb252fSJason Gunthorpe * Upon return the range_notifier may not be present in the interval tree yet. 93799cb252fSJason Gunthorpe * The caller must use the normal interval notifier read flow via 93899cb252fSJason Gunthorpe * mmu_interval_read_begin() to establish SPTEs for this range. 93999cb252fSJason Gunthorpe */ 94099cb252fSJason Gunthorpe int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni, 94199cb252fSJason Gunthorpe struct mm_struct *mm, unsigned long start, 94299cb252fSJason Gunthorpe unsigned long length, 94399cb252fSJason Gunthorpe const struct mmu_interval_notifier_ops *ops) 94499cb252fSJason Gunthorpe { 945*984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions *subscriptions; 94699cb252fSJason Gunthorpe int ret; 94799cb252fSJason Gunthorpe 94899cb252fSJason Gunthorpe might_lock(&mm->mmap_sem); 94999cb252fSJason Gunthorpe 950*984cfe4eSJason Gunthorpe subscriptions = smp_load_acquire(&mm->notifier_subscriptions); 951*984cfe4eSJason Gunthorpe if (!subscriptions || !subscriptions->has_itree) { 95299cb252fSJason Gunthorpe ret = mmu_notifier_register(NULL, mm); 95399cb252fSJason Gunthorpe if (ret) 95499cb252fSJason Gunthorpe return ret; 955*984cfe4eSJason Gunthorpe subscriptions = mm->notifier_subscriptions; 95699cb252fSJason Gunthorpe } 957*984cfe4eSJason Gunthorpe return __mmu_interval_notifier_insert(mni, mm, subscriptions, start, 958*984cfe4eSJason Gunthorpe length, ops); 95999cb252fSJason Gunthorpe } 96099cb252fSJason Gunthorpe EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert); 96199cb252fSJason Gunthorpe 96299cb252fSJason Gunthorpe int mmu_interval_notifier_insert_locked( 96399cb252fSJason Gunthorpe struct mmu_interval_notifier *mni, struct mm_struct *mm, 96499cb252fSJason Gunthorpe unsigned long start, unsigned long length, 96599cb252fSJason Gunthorpe const struct mmu_interval_notifier_ops *ops) 96699cb252fSJason Gunthorpe { 967*984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions *subscriptions = 968*984cfe4eSJason Gunthorpe mm->notifier_subscriptions; 96999cb252fSJason Gunthorpe int ret; 97099cb252fSJason Gunthorpe 97199cb252fSJason Gunthorpe lockdep_assert_held_write(&mm->mmap_sem); 97299cb252fSJason Gunthorpe 973*984cfe4eSJason Gunthorpe if (!subscriptions || !subscriptions->has_itree) { 97499cb252fSJason Gunthorpe ret = __mmu_notifier_register(NULL, mm); 97599cb252fSJason Gunthorpe if (ret) 97699cb252fSJason Gunthorpe return ret; 977*984cfe4eSJason Gunthorpe subscriptions = mm->notifier_subscriptions; 97899cb252fSJason Gunthorpe } 979*984cfe4eSJason Gunthorpe return __mmu_interval_notifier_insert(mni, mm, subscriptions, start, 980*984cfe4eSJason Gunthorpe length, ops); 98199cb252fSJason Gunthorpe } 98299cb252fSJason Gunthorpe EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked); 98399cb252fSJason Gunthorpe 98499cb252fSJason Gunthorpe /** 98599cb252fSJason Gunthorpe * mmu_interval_notifier_remove - Remove a interval notifier 98699cb252fSJason Gunthorpe * @mni: Interval notifier to unregister 98799cb252fSJason Gunthorpe * 98899cb252fSJason Gunthorpe * This function must be paired with mmu_interval_notifier_insert(). It cannot 98999cb252fSJason Gunthorpe * be called from any ops callback. 99099cb252fSJason Gunthorpe * 99199cb252fSJason Gunthorpe * Once this returns ops callbacks are no longer running on other CPUs and 99299cb252fSJason Gunthorpe * will not be called in future. 99399cb252fSJason Gunthorpe */ 99499cb252fSJason Gunthorpe void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni) 99599cb252fSJason Gunthorpe { 99699cb252fSJason Gunthorpe struct mm_struct *mm = mni->mm; 997*984cfe4eSJason Gunthorpe struct mmu_notifier_subscriptions *subscriptions = 998*984cfe4eSJason Gunthorpe mm->notifier_subscriptions; 99999cb252fSJason Gunthorpe unsigned long seq = 0; 100099cb252fSJason Gunthorpe 100199cb252fSJason Gunthorpe might_sleep(); 100299cb252fSJason Gunthorpe 1003*984cfe4eSJason Gunthorpe spin_lock(&subscriptions->lock); 1004*984cfe4eSJason Gunthorpe if (mn_itree_is_invalidating(subscriptions)) { 100599cb252fSJason Gunthorpe /* 100699cb252fSJason Gunthorpe * remove is being called after insert put this on the 100799cb252fSJason Gunthorpe * deferred list, but before the deferred list was processed. 100899cb252fSJason Gunthorpe */ 100999cb252fSJason Gunthorpe if (RB_EMPTY_NODE(&mni->interval_tree.rb)) { 101099cb252fSJason Gunthorpe hlist_del(&mni->deferred_item); 101199cb252fSJason Gunthorpe } else { 101299cb252fSJason Gunthorpe hlist_add_head(&mni->deferred_item, 1013*984cfe4eSJason Gunthorpe &subscriptions->deferred_list); 1014*984cfe4eSJason Gunthorpe seq = subscriptions->invalidate_seq; 101599cb252fSJason Gunthorpe } 101699cb252fSJason Gunthorpe } else { 101799cb252fSJason Gunthorpe WARN_ON(RB_EMPTY_NODE(&mni->interval_tree.rb)); 1018*984cfe4eSJason Gunthorpe interval_tree_remove(&mni->interval_tree, 1019*984cfe4eSJason Gunthorpe &subscriptions->itree); 102099cb252fSJason Gunthorpe } 1021*984cfe4eSJason Gunthorpe spin_unlock(&subscriptions->lock); 102299cb252fSJason Gunthorpe 102399cb252fSJason Gunthorpe /* 102499cb252fSJason Gunthorpe * The possible sleep on progress in the invalidation requires the 102599cb252fSJason Gunthorpe * caller not hold any locks held by invalidation callbacks. 102699cb252fSJason Gunthorpe */ 102799cb252fSJason Gunthorpe lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 102899cb252fSJason Gunthorpe lock_map_release(&__mmu_notifier_invalidate_range_start_map); 102999cb252fSJason Gunthorpe if (seq) 1030*984cfe4eSJason Gunthorpe wait_event(subscriptions->wq, 1031*984cfe4eSJason Gunthorpe READ_ONCE(subscriptions->invalidate_seq) != seq); 103299cb252fSJason Gunthorpe 103399cb252fSJason Gunthorpe /* pairs with mmgrab in mmu_interval_notifier_insert() */ 103499cb252fSJason Gunthorpe mmdrop(mm); 103599cb252fSJason Gunthorpe } 103699cb252fSJason Gunthorpe EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove); 103799cb252fSJason Gunthorpe 10382c7933f5SJason Gunthorpe /** 10392c7933f5SJason Gunthorpe * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed 10402c7933f5SJason Gunthorpe * 10412c7933f5SJason Gunthorpe * This function ensures that all outstanding async SRU work from 10422c7933f5SJason Gunthorpe * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops 10432c7933f5SJason Gunthorpe * associated with an unused mmu_notifier will no longer be called. 10442c7933f5SJason Gunthorpe * 10452c7933f5SJason Gunthorpe * Before using the caller must ensure that all of its mmu_notifiers have been 10462c7933f5SJason Gunthorpe * fully released via mmu_notifier_put(). 10472c7933f5SJason Gunthorpe * 10482c7933f5SJason Gunthorpe * Modules using the mmu_notifier_put() API should call this in their __exit 10492c7933f5SJason Gunthorpe * function to avoid module unloading races. 10502c7933f5SJason Gunthorpe */ 10512c7933f5SJason Gunthorpe void mmu_notifier_synchronize(void) 10522c7933f5SJason Gunthorpe { 10532c7933f5SJason Gunthorpe synchronize_srcu(&srcu); 10542c7933f5SJason Gunthorpe } 10552c7933f5SJason Gunthorpe EXPORT_SYMBOL_GPL(mmu_notifier_synchronize); 10562c7933f5SJason Gunthorpe 1057c6d23413SJérôme Glisse bool 1058c6d23413SJérôme Glisse mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range) 1059c6d23413SJérôme Glisse { 1060c6d23413SJérôme Glisse if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA) 1061c6d23413SJérôme Glisse return false; 1062c6d23413SJérôme Glisse /* Return true if the vma still have the read flag set. */ 1063c6d23413SJérôme Glisse return range->vma->vm_flags & VM_READ; 1064c6d23413SJérôme Glisse } 1065c6d23413SJérôme Glisse EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only); 1066