1*cddb8a5cSAndrea Arcangeli /* 2*cddb8a5cSAndrea Arcangeli * linux/mm/mmu_notifier.c 3*cddb8a5cSAndrea Arcangeli * 4*cddb8a5cSAndrea Arcangeli * Copyright (C) 2008 Qumranet, Inc. 5*cddb8a5cSAndrea Arcangeli * Copyright (C) 2008 SGI 6*cddb8a5cSAndrea Arcangeli * Christoph Lameter <clameter@sgi.com> 7*cddb8a5cSAndrea Arcangeli * 8*cddb8a5cSAndrea Arcangeli * This work is licensed under the terms of the GNU GPL, version 2. See 9*cddb8a5cSAndrea Arcangeli * the COPYING file in the top-level directory. 10*cddb8a5cSAndrea Arcangeli */ 11*cddb8a5cSAndrea Arcangeli 12*cddb8a5cSAndrea Arcangeli #include <linux/rculist.h> 13*cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 14*cddb8a5cSAndrea Arcangeli #include <linux/module.h> 15*cddb8a5cSAndrea Arcangeli #include <linux/mm.h> 16*cddb8a5cSAndrea Arcangeli #include <linux/err.h> 17*cddb8a5cSAndrea Arcangeli #include <linux/rcupdate.h> 18*cddb8a5cSAndrea Arcangeli #include <linux/sched.h> 19*cddb8a5cSAndrea Arcangeli 20*cddb8a5cSAndrea Arcangeli /* 21*cddb8a5cSAndrea Arcangeli * This function can't run concurrently against mmu_notifier_register 22*cddb8a5cSAndrea Arcangeli * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap 23*cddb8a5cSAndrea Arcangeli * runs with mm_users == 0. Other tasks may still invoke mmu notifiers 24*cddb8a5cSAndrea Arcangeli * in parallel despite there being no task using this mm any more, 25*cddb8a5cSAndrea Arcangeli * through the vmas outside of the exit_mmap context, such as with 26*cddb8a5cSAndrea Arcangeli * vmtruncate. This serializes against mmu_notifier_unregister with 27*cddb8a5cSAndrea Arcangeli * the mmu_notifier_mm->lock in addition to RCU and it serializes 28*cddb8a5cSAndrea Arcangeli * against the other mmu notifiers with RCU. struct mmu_notifier_mm 29*cddb8a5cSAndrea Arcangeli * can't go away from under us as exit_mmap holds an mm_count pin 30*cddb8a5cSAndrea Arcangeli * itself. 31*cddb8a5cSAndrea Arcangeli */ 32*cddb8a5cSAndrea Arcangeli void __mmu_notifier_release(struct mm_struct *mm) 33*cddb8a5cSAndrea Arcangeli { 34*cddb8a5cSAndrea Arcangeli struct mmu_notifier *mn; 35*cddb8a5cSAndrea Arcangeli 36*cddb8a5cSAndrea Arcangeli spin_lock(&mm->mmu_notifier_mm->lock); 37*cddb8a5cSAndrea Arcangeli while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { 38*cddb8a5cSAndrea Arcangeli mn = hlist_entry(mm->mmu_notifier_mm->list.first, 39*cddb8a5cSAndrea Arcangeli struct mmu_notifier, 40*cddb8a5cSAndrea Arcangeli hlist); 41*cddb8a5cSAndrea Arcangeli /* 42*cddb8a5cSAndrea Arcangeli * We arrived before mmu_notifier_unregister so 43*cddb8a5cSAndrea Arcangeli * mmu_notifier_unregister will do nothing other than 44*cddb8a5cSAndrea Arcangeli * to wait ->release to finish and 45*cddb8a5cSAndrea Arcangeli * mmu_notifier_unregister to return. 46*cddb8a5cSAndrea Arcangeli */ 47*cddb8a5cSAndrea Arcangeli hlist_del_init_rcu(&mn->hlist); 48*cddb8a5cSAndrea Arcangeli /* 49*cddb8a5cSAndrea Arcangeli * RCU here will block mmu_notifier_unregister until 50*cddb8a5cSAndrea Arcangeli * ->release returns. 51*cddb8a5cSAndrea Arcangeli */ 52*cddb8a5cSAndrea Arcangeli rcu_read_lock(); 53*cddb8a5cSAndrea Arcangeli spin_unlock(&mm->mmu_notifier_mm->lock); 54*cddb8a5cSAndrea Arcangeli /* 55*cddb8a5cSAndrea Arcangeli * if ->release runs before mmu_notifier_unregister it 56*cddb8a5cSAndrea Arcangeli * must be handled as it's the only way for the driver 57*cddb8a5cSAndrea Arcangeli * to flush all existing sptes and stop the driver 58*cddb8a5cSAndrea Arcangeli * from establishing any more sptes before all the 59*cddb8a5cSAndrea Arcangeli * pages in the mm are freed. 60*cddb8a5cSAndrea Arcangeli */ 61*cddb8a5cSAndrea Arcangeli if (mn->ops->release) 62*cddb8a5cSAndrea Arcangeli mn->ops->release(mn, mm); 63*cddb8a5cSAndrea Arcangeli rcu_read_unlock(); 64*cddb8a5cSAndrea Arcangeli spin_lock(&mm->mmu_notifier_mm->lock); 65*cddb8a5cSAndrea Arcangeli } 66*cddb8a5cSAndrea Arcangeli spin_unlock(&mm->mmu_notifier_mm->lock); 67*cddb8a5cSAndrea Arcangeli 68*cddb8a5cSAndrea Arcangeli /* 69*cddb8a5cSAndrea Arcangeli * synchronize_rcu here prevents mmu_notifier_release to 70*cddb8a5cSAndrea Arcangeli * return to exit_mmap (which would proceed freeing all pages 71*cddb8a5cSAndrea Arcangeli * in the mm) until the ->release method returns, if it was 72*cddb8a5cSAndrea Arcangeli * invoked by mmu_notifier_unregister. 73*cddb8a5cSAndrea Arcangeli * 74*cddb8a5cSAndrea Arcangeli * The mmu_notifier_mm can't go away from under us because one 75*cddb8a5cSAndrea Arcangeli * mm_count is hold by exit_mmap. 76*cddb8a5cSAndrea Arcangeli */ 77*cddb8a5cSAndrea Arcangeli synchronize_rcu(); 78*cddb8a5cSAndrea Arcangeli } 79*cddb8a5cSAndrea Arcangeli 80*cddb8a5cSAndrea Arcangeli /* 81*cddb8a5cSAndrea Arcangeli * If no young bitflag is supported by the hardware, ->clear_flush_young can 82*cddb8a5cSAndrea Arcangeli * unmap the address and return 1 or 0 depending if the mapping previously 83*cddb8a5cSAndrea Arcangeli * existed or not. 84*cddb8a5cSAndrea Arcangeli */ 85*cddb8a5cSAndrea Arcangeli int __mmu_notifier_clear_flush_young(struct mm_struct *mm, 86*cddb8a5cSAndrea Arcangeli unsigned long address) 87*cddb8a5cSAndrea Arcangeli { 88*cddb8a5cSAndrea Arcangeli struct mmu_notifier *mn; 89*cddb8a5cSAndrea Arcangeli struct hlist_node *n; 90*cddb8a5cSAndrea Arcangeli int young = 0; 91*cddb8a5cSAndrea Arcangeli 92*cddb8a5cSAndrea Arcangeli rcu_read_lock(); 93*cddb8a5cSAndrea Arcangeli hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 94*cddb8a5cSAndrea Arcangeli if (mn->ops->clear_flush_young) 95*cddb8a5cSAndrea Arcangeli young |= mn->ops->clear_flush_young(mn, mm, address); 96*cddb8a5cSAndrea Arcangeli } 97*cddb8a5cSAndrea Arcangeli rcu_read_unlock(); 98*cddb8a5cSAndrea Arcangeli 99*cddb8a5cSAndrea Arcangeli return young; 100*cddb8a5cSAndrea Arcangeli } 101*cddb8a5cSAndrea Arcangeli 102*cddb8a5cSAndrea Arcangeli void __mmu_notifier_invalidate_page(struct mm_struct *mm, 103*cddb8a5cSAndrea Arcangeli unsigned long address) 104*cddb8a5cSAndrea Arcangeli { 105*cddb8a5cSAndrea Arcangeli struct mmu_notifier *mn; 106*cddb8a5cSAndrea Arcangeli struct hlist_node *n; 107*cddb8a5cSAndrea Arcangeli 108*cddb8a5cSAndrea Arcangeli rcu_read_lock(); 109*cddb8a5cSAndrea Arcangeli hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 110*cddb8a5cSAndrea Arcangeli if (mn->ops->invalidate_page) 111*cddb8a5cSAndrea Arcangeli mn->ops->invalidate_page(mn, mm, address); 112*cddb8a5cSAndrea Arcangeli } 113*cddb8a5cSAndrea Arcangeli rcu_read_unlock(); 114*cddb8a5cSAndrea Arcangeli } 115*cddb8a5cSAndrea Arcangeli 116*cddb8a5cSAndrea Arcangeli void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, 117*cddb8a5cSAndrea Arcangeli unsigned long start, unsigned long end) 118*cddb8a5cSAndrea Arcangeli { 119*cddb8a5cSAndrea Arcangeli struct mmu_notifier *mn; 120*cddb8a5cSAndrea Arcangeli struct hlist_node *n; 121*cddb8a5cSAndrea Arcangeli 122*cddb8a5cSAndrea Arcangeli rcu_read_lock(); 123*cddb8a5cSAndrea Arcangeli hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 124*cddb8a5cSAndrea Arcangeli if (mn->ops->invalidate_range_start) 125*cddb8a5cSAndrea Arcangeli mn->ops->invalidate_range_start(mn, mm, start, end); 126*cddb8a5cSAndrea Arcangeli } 127*cddb8a5cSAndrea Arcangeli rcu_read_unlock(); 128*cddb8a5cSAndrea Arcangeli } 129*cddb8a5cSAndrea Arcangeli 130*cddb8a5cSAndrea Arcangeli void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, 131*cddb8a5cSAndrea Arcangeli unsigned long start, unsigned long end) 132*cddb8a5cSAndrea Arcangeli { 133*cddb8a5cSAndrea Arcangeli struct mmu_notifier *mn; 134*cddb8a5cSAndrea Arcangeli struct hlist_node *n; 135*cddb8a5cSAndrea Arcangeli 136*cddb8a5cSAndrea Arcangeli rcu_read_lock(); 137*cddb8a5cSAndrea Arcangeli hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 138*cddb8a5cSAndrea Arcangeli if (mn->ops->invalidate_range_end) 139*cddb8a5cSAndrea Arcangeli mn->ops->invalidate_range_end(mn, mm, start, end); 140*cddb8a5cSAndrea Arcangeli } 141*cddb8a5cSAndrea Arcangeli rcu_read_unlock(); 142*cddb8a5cSAndrea Arcangeli } 143*cddb8a5cSAndrea Arcangeli 144*cddb8a5cSAndrea Arcangeli static int do_mmu_notifier_register(struct mmu_notifier *mn, 145*cddb8a5cSAndrea Arcangeli struct mm_struct *mm, 146*cddb8a5cSAndrea Arcangeli int take_mmap_sem) 147*cddb8a5cSAndrea Arcangeli { 148*cddb8a5cSAndrea Arcangeli struct mmu_notifier_mm *mmu_notifier_mm; 149*cddb8a5cSAndrea Arcangeli int ret; 150*cddb8a5cSAndrea Arcangeli 151*cddb8a5cSAndrea Arcangeli BUG_ON(atomic_read(&mm->mm_users) <= 0); 152*cddb8a5cSAndrea Arcangeli 153*cddb8a5cSAndrea Arcangeli ret = -ENOMEM; 154*cddb8a5cSAndrea Arcangeli mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); 155*cddb8a5cSAndrea Arcangeli if (unlikely(!mmu_notifier_mm)) 156*cddb8a5cSAndrea Arcangeli goto out; 157*cddb8a5cSAndrea Arcangeli 158*cddb8a5cSAndrea Arcangeli if (take_mmap_sem) 159*cddb8a5cSAndrea Arcangeli down_write(&mm->mmap_sem); 160*cddb8a5cSAndrea Arcangeli ret = mm_take_all_locks(mm); 161*cddb8a5cSAndrea Arcangeli if (unlikely(ret)) 162*cddb8a5cSAndrea Arcangeli goto out_cleanup; 163*cddb8a5cSAndrea Arcangeli 164*cddb8a5cSAndrea Arcangeli if (!mm_has_notifiers(mm)) { 165*cddb8a5cSAndrea Arcangeli INIT_HLIST_HEAD(&mmu_notifier_mm->list); 166*cddb8a5cSAndrea Arcangeli spin_lock_init(&mmu_notifier_mm->lock); 167*cddb8a5cSAndrea Arcangeli mm->mmu_notifier_mm = mmu_notifier_mm; 168*cddb8a5cSAndrea Arcangeli mmu_notifier_mm = NULL; 169*cddb8a5cSAndrea Arcangeli } 170*cddb8a5cSAndrea Arcangeli atomic_inc(&mm->mm_count); 171*cddb8a5cSAndrea Arcangeli 172*cddb8a5cSAndrea Arcangeli /* 173*cddb8a5cSAndrea Arcangeli * Serialize the update against mmu_notifier_unregister. A 174*cddb8a5cSAndrea Arcangeli * side note: mmu_notifier_release can't run concurrently with 175*cddb8a5cSAndrea Arcangeli * us because we hold the mm_users pin (either implicitly as 176*cddb8a5cSAndrea Arcangeli * current->mm or explicitly with get_task_mm() or similar). 177*cddb8a5cSAndrea Arcangeli * We can't race against any other mmu notifier method either 178*cddb8a5cSAndrea Arcangeli * thanks to mm_take_all_locks(). 179*cddb8a5cSAndrea Arcangeli */ 180*cddb8a5cSAndrea Arcangeli spin_lock(&mm->mmu_notifier_mm->lock); 181*cddb8a5cSAndrea Arcangeli hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); 182*cddb8a5cSAndrea Arcangeli spin_unlock(&mm->mmu_notifier_mm->lock); 183*cddb8a5cSAndrea Arcangeli 184*cddb8a5cSAndrea Arcangeli mm_drop_all_locks(mm); 185*cddb8a5cSAndrea Arcangeli out_cleanup: 186*cddb8a5cSAndrea Arcangeli if (take_mmap_sem) 187*cddb8a5cSAndrea Arcangeli up_write(&mm->mmap_sem); 188*cddb8a5cSAndrea Arcangeli /* kfree() does nothing if mmu_notifier_mm is NULL */ 189*cddb8a5cSAndrea Arcangeli kfree(mmu_notifier_mm); 190*cddb8a5cSAndrea Arcangeli out: 191*cddb8a5cSAndrea Arcangeli BUG_ON(atomic_read(&mm->mm_users) <= 0); 192*cddb8a5cSAndrea Arcangeli return ret; 193*cddb8a5cSAndrea Arcangeli } 194*cddb8a5cSAndrea Arcangeli 195*cddb8a5cSAndrea Arcangeli /* 196*cddb8a5cSAndrea Arcangeli * Must not hold mmap_sem nor any other VM related lock when calling 197*cddb8a5cSAndrea Arcangeli * this registration function. Must also ensure mm_users can't go down 198*cddb8a5cSAndrea Arcangeli * to zero while this runs to avoid races with mmu_notifier_release, 199*cddb8a5cSAndrea Arcangeli * so mm has to be current->mm or the mm should be pinned safely such 200*cddb8a5cSAndrea Arcangeli * as with get_task_mm(). If the mm is not current->mm, the mm_users 201*cddb8a5cSAndrea Arcangeli * pin should be released by calling mmput after mmu_notifier_register 202*cddb8a5cSAndrea Arcangeli * returns. mmu_notifier_unregister must be always called to 203*cddb8a5cSAndrea Arcangeli * unregister the notifier. mm_count is automatically pinned to allow 204*cddb8a5cSAndrea Arcangeli * mmu_notifier_unregister to safely run at any time later, before or 205*cddb8a5cSAndrea Arcangeli * after exit_mmap. ->release will always be called before exit_mmap 206*cddb8a5cSAndrea Arcangeli * frees the pages. 207*cddb8a5cSAndrea Arcangeli */ 208*cddb8a5cSAndrea Arcangeli int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) 209*cddb8a5cSAndrea Arcangeli { 210*cddb8a5cSAndrea Arcangeli return do_mmu_notifier_register(mn, mm, 1); 211*cddb8a5cSAndrea Arcangeli } 212*cddb8a5cSAndrea Arcangeli EXPORT_SYMBOL_GPL(mmu_notifier_register); 213*cddb8a5cSAndrea Arcangeli 214*cddb8a5cSAndrea Arcangeli /* 215*cddb8a5cSAndrea Arcangeli * Same as mmu_notifier_register but here the caller must hold the 216*cddb8a5cSAndrea Arcangeli * mmap_sem in write mode. 217*cddb8a5cSAndrea Arcangeli */ 218*cddb8a5cSAndrea Arcangeli int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) 219*cddb8a5cSAndrea Arcangeli { 220*cddb8a5cSAndrea Arcangeli return do_mmu_notifier_register(mn, mm, 0); 221*cddb8a5cSAndrea Arcangeli } 222*cddb8a5cSAndrea Arcangeli EXPORT_SYMBOL_GPL(__mmu_notifier_register); 223*cddb8a5cSAndrea Arcangeli 224*cddb8a5cSAndrea Arcangeli /* this is called after the last mmu_notifier_unregister() returned */ 225*cddb8a5cSAndrea Arcangeli void __mmu_notifier_mm_destroy(struct mm_struct *mm) 226*cddb8a5cSAndrea Arcangeli { 227*cddb8a5cSAndrea Arcangeli BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list)); 228*cddb8a5cSAndrea Arcangeli kfree(mm->mmu_notifier_mm); 229*cddb8a5cSAndrea Arcangeli mm->mmu_notifier_mm = LIST_POISON1; /* debug */ 230*cddb8a5cSAndrea Arcangeli } 231*cddb8a5cSAndrea Arcangeli 232*cddb8a5cSAndrea Arcangeli /* 233*cddb8a5cSAndrea Arcangeli * This releases the mm_count pin automatically and frees the mm 234*cddb8a5cSAndrea Arcangeli * structure if it was the last user of it. It serializes against 235*cddb8a5cSAndrea Arcangeli * running mmu notifiers with RCU and against mmu_notifier_unregister 236*cddb8a5cSAndrea Arcangeli * with the unregister lock + RCU. All sptes must be dropped before 237*cddb8a5cSAndrea Arcangeli * calling mmu_notifier_unregister. ->release or any other notifier 238*cddb8a5cSAndrea Arcangeli * method may be invoked concurrently with mmu_notifier_unregister, 239*cddb8a5cSAndrea Arcangeli * and only after mmu_notifier_unregister returned we're guaranteed 240*cddb8a5cSAndrea Arcangeli * that ->release or any other method can't run anymore. 241*cddb8a5cSAndrea Arcangeli */ 242*cddb8a5cSAndrea Arcangeli void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) 243*cddb8a5cSAndrea Arcangeli { 244*cddb8a5cSAndrea Arcangeli BUG_ON(atomic_read(&mm->mm_count) <= 0); 245*cddb8a5cSAndrea Arcangeli 246*cddb8a5cSAndrea Arcangeli spin_lock(&mm->mmu_notifier_mm->lock); 247*cddb8a5cSAndrea Arcangeli if (!hlist_unhashed(&mn->hlist)) { 248*cddb8a5cSAndrea Arcangeli hlist_del_rcu(&mn->hlist); 249*cddb8a5cSAndrea Arcangeli 250*cddb8a5cSAndrea Arcangeli /* 251*cddb8a5cSAndrea Arcangeli * RCU here will force exit_mmap to wait ->release to finish 252*cddb8a5cSAndrea Arcangeli * before freeing the pages. 253*cddb8a5cSAndrea Arcangeli */ 254*cddb8a5cSAndrea Arcangeli rcu_read_lock(); 255*cddb8a5cSAndrea Arcangeli spin_unlock(&mm->mmu_notifier_mm->lock); 256*cddb8a5cSAndrea Arcangeli /* 257*cddb8a5cSAndrea Arcangeli * exit_mmap will block in mmu_notifier_release to 258*cddb8a5cSAndrea Arcangeli * guarantee ->release is called before freeing the 259*cddb8a5cSAndrea Arcangeli * pages. 260*cddb8a5cSAndrea Arcangeli */ 261*cddb8a5cSAndrea Arcangeli if (mn->ops->release) 262*cddb8a5cSAndrea Arcangeli mn->ops->release(mn, mm); 263*cddb8a5cSAndrea Arcangeli rcu_read_unlock(); 264*cddb8a5cSAndrea Arcangeli } else 265*cddb8a5cSAndrea Arcangeli spin_unlock(&mm->mmu_notifier_mm->lock); 266*cddb8a5cSAndrea Arcangeli 267*cddb8a5cSAndrea Arcangeli /* 268*cddb8a5cSAndrea Arcangeli * Wait any running method to finish, of course including 269*cddb8a5cSAndrea Arcangeli * ->release if it was run by mmu_notifier_relase instead of us. 270*cddb8a5cSAndrea Arcangeli */ 271*cddb8a5cSAndrea Arcangeli synchronize_rcu(); 272*cddb8a5cSAndrea Arcangeli 273*cddb8a5cSAndrea Arcangeli BUG_ON(atomic_read(&mm->mm_count) <= 0); 274*cddb8a5cSAndrea Arcangeli 275*cddb8a5cSAndrea Arcangeli mmdrop(mm); 276*cddb8a5cSAndrea Arcangeli } 277*cddb8a5cSAndrea Arcangeli EXPORT_SYMBOL_GPL(mmu_notifier_unregister); 278