xref: /linux/mm/mmu_notifier.c (revision edc7616c307ad315159a8aa050142237f524e079)
1 /*
2  *  linux/mm/mmu_notifier.c
3  *
4  *  Copyright (C) 2008  Qumranet, Inc.
5  *  Copyright (C) 2008  SGI
6  *             Christoph Lameter <clameter@sgi.com>
7  *
8  *  This work is licensed under the terms of the GNU GPL, version 2. See
9  *  the COPYING file in the top-level directory.
10  */
11 
12 #include <linux/rculist.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/err.h>
17 #include <linux/rcupdate.h>
18 #include <linux/sched.h>
19 
20 /*
21  * This function can't run concurrently against mmu_notifier_register
22  * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
23  * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
24  * in parallel despite there being no task using this mm any more,
25  * through the vmas outside of the exit_mmap context, such as with
26  * vmtruncate. This serializes against mmu_notifier_unregister with
27  * the mmu_notifier_mm->lock in addition to RCU and it serializes
28  * against the other mmu notifiers with RCU. struct mmu_notifier_mm
29  * can't go away from under us as exit_mmap holds an mm_count pin
30  * itself.
31  */
32 void __mmu_notifier_release(struct mm_struct *mm)
33 {
34 	struct mmu_notifier *mn;
35 
36 	spin_lock(&mm->mmu_notifier_mm->lock);
37 	while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
38 		mn = hlist_entry(mm->mmu_notifier_mm->list.first,
39 				 struct mmu_notifier,
40 				 hlist);
41 		/*
42 		 * We arrived before mmu_notifier_unregister so
43 		 * mmu_notifier_unregister will do nothing other than
44 		 * to wait ->release to finish and
45 		 * mmu_notifier_unregister to return.
46 		 */
47 		hlist_del_init_rcu(&mn->hlist);
48 		/*
49 		 * RCU here will block mmu_notifier_unregister until
50 		 * ->release returns.
51 		 */
52 		rcu_read_lock();
53 		spin_unlock(&mm->mmu_notifier_mm->lock);
54 		/*
55 		 * if ->release runs before mmu_notifier_unregister it
56 		 * must be handled as it's the only way for the driver
57 		 * to flush all existing sptes and stop the driver
58 		 * from establishing any more sptes before all the
59 		 * pages in the mm are freed.
60 		 */
61 		if (mn->ops->release)
62 			mn->ops->release(mn, mm);
63 		rcu_read_unlock();
64 		spin_lock(&mm->mmu_notifier_mm->lock);
65 	}
66 	spin_unlock(&mm->mmu_notifier_mm->lock);
67 
68 	/*
69 	 * synchronize_rcu here prevents mmu_notifier_release to
70 	 * return to exit_mmap (which would proceed freeing all pages
71 	 * in the mm) until the ->release method returns, if it was
72 	 * invoked by mmu_notifier_unregister.
73 	 *
74 	 * The mmu_notifier_mm can't go away from under us because one
75 	 * mm_count is hold by exit_mmap.
76 	 */
77 	synchronize_rcu();
78 }
79 
80 /*
81  * If no young bitflag is supported by the hardware, ->clear_flush_young can
82  * unmap the address and return 1 or 0 depending if the mapping previously
83  * existed or not.
84  */
85 int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
86 					unsigned long address)
87 {
88 	struct mmu_notifier *mn;
89 	struct hlist_node *n;
90 	int young = 0;
91 
92 	rcu_read_lock();
93 	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
94 		if (mn->ops->clear_flush_young)
95 			young |= mn->ops->clear_flush_young(mn, mm, address);
96 	}
97 	rcu_read_unlock();
98 
99 	return young;
100 }
101 
102 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
103 			       pte_t pte)
104 {
105 	struct mmu_notifier *mn;
106 	struct hlist_node *n;
107 
108 	rcu_read_lock();
109 	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
110 		if (mn->ops->change_pte)
111 			mn->ops->change_pte(mn, mm, address, pte);
112 		/*
113 		 * Some drivers don't have change_pte,
114 		 * so we must call invalidate_page in that case.
115 		 */
116 		else if (mn->ops->invalidate_page)
117 			mn->ops->invalidate_page(mn, mm, address);
118 	}
119 	rcu_read_unlock();
120 }
121 
122 void __mmu_notifier_invalidate_page(struct mm_struct *mm,
123 					  unsigned long address)
124 {
125 	struct mmu_notifier *mn;
126 	struct hlist_node *n;
127 
128 	rcu_read_lock();
129 	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
130 		if (mn->ops->invalidate_page)
131 			mn->ops->invalidate_page(mn, mm, address);
132 	}
133 	rcu_read_unlock();
134 }
135 
136 void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
137 				  unsigned long start, unsigned long end)
138 {
139 	struct mmu_notifier *mn;
140 	struct hlist_node *n;
141 
142 	rcu_read_lock();
143 	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
144 		if (mn->ops->invalidate_range_start)
145 			mn->ops->invalidate_range_start(mn, mm, start, end);
146 	}
147 	rcu_read_unlock();
148 }
149 
150 void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
151 				  unsigned long start, unsigned long end)
152 {
153 	struct mmu_notifier *mn;
154 	struct hlist_node *n;
155 
156 	rcu_read_lock();
157 	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
158 		if (mn->ops->invalidate_range_end)
159 			mn->ops->invalidate_range_end(mn, mm, start, end);
160 	}
161 	rcu_read_unlock();
162 }
163 
164 static int do_mmu_notifier_register(struct mmu_notifier *mn,
165 				    struct mm_struct *mm,
166 				    int take_mmap_sem)
167 {
168 	struct mmu_notifier_mm *mmu_notifier_mm;
169 	int ret;
170 
171 	BUG_ON(atomic_read(&mm->mm_users) <= 0);
172 
173 	ret = -ENOMEM;
174 	mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
175 	if (unlikely(!mmu_notifier_mm))
176 		goto out;
177 
178 	if (take_mmap_sem)
179 		down_write(&mm->mmap_sem);
180 	ret = mm_take_all_locks(mm);
181 	if (unlikely(ret))
182 		goto out_cleanup;
183 
184 	if (!mm_has_notifiers(mm)) {
185 		INIT_HLIST_HEAD(&mmu_notifier_mm->list);
186 		spin_lock_init(&mmu_notifier_mm->lock);
187 		mm->mmu_notifier_mm = mmu_notifier_mm;
188 		mmu_notifier_mm = NULL;
189 	}
190 	atomic_inc(&mm->mm_count);
191 
192 	/*
193 	 * Serialize the update against mmu_notifier_unregister. A
194 	 * side note: mmu_notifier_release can't run concurrently with
195 	 * us because we hold the mm_users pin (either implicitly as
196 	 * current->mm or explicitly with get_task_mm() or similar).
197 	 * We can't race against any other mmu notifier method either
198 	 * thanks to mm_take_all_locks().
199 	 */
200 	spin_lock(&mm->mmu_notifier_mm->lock);
201 	hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
202 	spin_unlock(&mm->mmu_notifier_mm->lock);
203 
204 	mm_drop_all_locks(mm);
205 out_cleanup:
206 	if (take_mmap_sem)
207 		up_write(&mm->mmap_sem);
208 	/* kfree() does nothing if mmu_notifier_mm is NULL */
209 	kfree(mmu_notifier_mm);
210 out:
211 	BUG_ON(atomic_read(&mm->mm_users) <= 0);
212 	return ret;
213 }
214 
215 /*
216  * Must not hold mmap_sem nor any other VM related lock when calling
217  * this registration function. Must also ensure mm_users can't go down
218  * to zero while this runs to avoid races with mmu_notifier_release,
219  * so mm has to be current->mm or the mm should be pinned safely such
220  * as with get_task_mm(). If the mm is not current->mm, the mm_users
221  * pin should be released by calling mmput after mmu_notifier_register
222  * returns. mmu_notifier_unregister must be always called to
223  * unregister the notifier. mm_count is automatically pinned to allow
224  * mmu_notifier_unregister to safely run at any time later, before or
225  * after exit_mmap. ->release will always be called before exit_mmap
226  * frees the pages.
227  */
228 int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
229 {
230 	return do_mmu_notifier_register(mn, mm, 1);
231 }
232 EXPORT_SYMBOL_GPL(mmu_notifier_register);
233 
234 /*
235  * Same as mmu_notifier_register but here the caller must hold the
236  * mmap_sem in write mode.
237  */
238 int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
239 {
240 	return do_mmu_notifier_register(mn, mm, 0);
241 }
242 EXPORT_SYMBOL_GPL(__mmu_notifier_register);
243 
244 /* this is called after the last mmu_notifier_unregister() returned */
245 void __mmu_notifier_mm_destroy(struct mm_struct *mm)
246 {
247 	BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
248 	kfree(mm->mmu_notifier_mm);
249 	mm->mmu_notifier_mm = LIST_POISON1; /* debug */
250 }
251 
252 /*
253  * This releases the mm_count pin automatically and frees the mm
254  * structure if it was the last user of it. It serializes against
255  * running mmu notifiers with RCU and against mmu_notifier_unregister
256  * with the unregister lock + RCU. All sptes must be dropped before
257  * calling mmu_notifier_unregister. ->release or any other notifier
258  * method may be invoked concurrently with mmu_notifier_unregister,
259  * and only after mmu_notifier_unregister returned we're guaranteed
260  * that ->release or any other method can't run anymore.
261  */
262 void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
263 {
264 	BUG_ON(atomic_read(&mm->mm_count) <= 0);
265 
266 	spin_lock(&mm->mmu_notifier_mm->lock);
267 	if (!hlist_unhashed(&mn->hlist)) {
268 		hlist_del_rcu(&mn->hlist);
269 
270 		/*
271 		 * RCU here will force exit_mmap to wait ->release to finish
272 		 * before freeing the pages.
273 		 */
274 		rcu_read_lock();
275 		spin_unlock(&mm->mmu_notifier_mm->lock);
276 		/*
277 		 * exit_mmap will block in mmu_notifier_release to
278 		 * guarantee ->release is called before freeing the
279 		 * pages.
280 		 */
281 		if (mn->ops->release)
282 			mn->ops->release(mn, mm);
283 		rcu_read_unlock();
284 	} else
285 		spin_unlock(&mm->mmu_notifier_mm->lock);
286 
287 	/*
288 	 * Wait any running method to finish, of course including
289 	 * ->release if it was run by mmu_notifier_relase instead of us.
290 	 */
291 	synchronize_rcu();
292 
293 	BUG_ON(atomic_read(&mm->mm_count) <= 0);
294 
295 	mmdrop(mm);
296 }
297 EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
298