1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/mmu_notifier.c 4 * 5 * Copyright (C) 2008 Qumranet, Inc. 6 * Copyright (C) 2008 SGI 7 * Christoph Lameter <cl@gentwo.org> 8 */ 9 10 #include <linux/rculist.h> 11 #include <linux/mmu_notifier.h> 12 #include <linux/export.h> 13 #include <linux/mm.h> 14 #include <linux/err.h> 15 #include <linux/interval_tree.h> 16 #include <linux/srcu.h> 17 #include <linux/rcupdate.h> 18 #include <linux/sched.h> 19 #include <linux/sched/mm.h> 20 #include <linux/slab.h> 21 22 #include "vma.h" 23 24 /* global SRCU for all MMs */ 25 DEFINE_STATIC_SRCU(srcu); 26 27 #ifdef CONFIG_LOCKDEP 28 struct lockdep_map __mmu_notifier_invalidate_range_start_map = { 29 .name = "mmu_notifier_invalidate_range_start" 30 }; 31 #endif 32 33 /* 34 * The mmu_notifier_subscriptions structure is allocated and installed in 35 * mm->notifier_subscriptions inside the mm_take_all_locks() protected 36 * critical section and it's released only when mm_count reaches zero 37 * in mmdrop(). 38 */ 39 struct mmu_notifier_subscriptions { 40 /* all mmu notifiers registered in this mm are queued in this list */ 41 struct hlist_head list; 42 bool has_itree; 43 /* to serialize the list modifications and hlist_unhashed */ 44 spinlock_t lock; 45 unsigned long invalidate_seq; 46 unsigned long active_invalidate_ranges; 47 struct rb_root_cached itree; 48 wait_queue_head_t wq; 49 struct hlist_head deferred_list; 50 }; 51 52 /* 53 * This is a collision-retry read-side/write-side 'lock', a lot like a 54 * seqcount, however this allows multiple write-sides to hold it at 55 * once. Conceptually the write side is protecting the values of the PTEs in 56 * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any 57 * writer exists. 58 * 59 * Note that the core mm creates nested invalidate_range_start()/end() regions 60 * within the same thread, and runs invalidate_range_start()/end() in parallel 61 * on multiple CPUs. This is designed to not reduce concurrency or block 62 * progress on the mm side. 63 * 64 * As a secondary function, holding the full write side also serves to prevent 65 * writers for the itree, this is an optimization to avoid extra locking 66 * during invalidate_range_start/end notifiers. 67 * 68 * The write side has two states, fully excluded: 69 * - mm->active_invalidate_ranges != 0 70 * - subscriptions->invalidate_seq & 1 == True (odd) 71 * - some range on the mm_struct is being invalidated 72 * - the itree is not allowed to change 73 * 74 * And partially excluded: 75 * - mm->active_invalidate_ranges != 0 76 * - subscriptions->invalidate_seq & 1 == False (even) 77 * - some range on the mm_struct is being invalidated 78 * - the itree is allowed to change 79 * 80 * Operations on notifier_subscriptions->invalidate_seq (under spinlock): 81 * seq |= 1 # Begin writing 82 * seq++ # Release the writing state 83 * seq & 1 # True if a writer exists 84 * 85 * The later state avoids some expensive work on inv_end in the common case of 86 * no mmu_interval_notifier monitoring the VA. 87 */ 88 static bool 89 mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions) 90 { 91 lockdep_assert_held(&subscriptions->lock); 92 return subscriptions->invalidate_seq & 1; 93 } 94 95 static struct mmu_interval_notifier * 96 mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions, 97 const struct mmu_notifier_range *range, 98 unsigned long *seq) 99 { 100 struct interval_tree_node *node; 101 struct mmu_interval_notifier *res = NULL; 102 103 spin_lock(&subscriptions->lock); 104 subscriptions->active_invalidate_ranges++; 105 node = interval_tree_iter_first(&subscriptions->itree, range->start, 106 range->end - 1); 107 if (node) { 108 subscriptions->invalidate_seq |= 1; 109 res = container_of(node, struct mmu_interval_notifier, 110 interval_tree); 111 } 112 113 *seq = subscriptions->invalidate_seq; 114 spin_unlock(&subscriptions->lock); 115 return res; 116 } 117 118 static struct mmu_interval_notifier * 119 mn_itree_inv_next(struct mmu_interval_notifier *interval_sub, 120 const struct mmu_notifier_range *range) 121 { 122 struct interval_tree_node *node; 123 124 node = interval_tree_iter_next(&interval_sub->interval_tree, 125 range->start, range->end - 1); 126 if (!node) 127 return NULL; 128 return container_of(node, struct mmu_interval_notifier, interval_tree); 129 } 130 131 static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions) 132 { 133 struct mmu_interval_notifier *interval_sub; 134 struct hlist_node *next; 135 136 spin_lock(&subscriptions->lock); 137 if (--subscriptions->active_invalidate_ranges || 138 !mn_itree_is_invalidating(subscriptions)) { 139 spin_unlock(&subscriptions->lock); 140 return; 141 } 142 143 /* Make invalidate_seq even */ 144 subscriptions->invalidate_seq++; 145 146 /* 147 * The inv_end incorporates a deferred mechanism like rtnl_unlock(). 148 * Adds and removes are queued until the final inv_end happens then 149 * they are progressed. This arrangement for tree updates is used to 150 * avoid using a blocking lock during invalidate_range_start. 151 */ 152 hlist_for_each_entry_safe(interval_sub, next, 153 &subscriptions->deferred_list, 154 deferred_item) { 155 if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) 156 interval_tree_insert(&interval_sub->interval_tree, 157 &subscriptions->itree); 158 else 159 interval_tree_remove(&interval_sub->interval_tree, 160 &subscriptions->itree); 161 hlist_del(&interval_sub->deferred_item); 162 } 163 spin_unlock(&subscriptions->lock); 164 165 wake_up_all(&subscriptions->wq); 166 } 167 168 /** 169 * mmu_interval_read_begin - Begin a read side critical section against a VA 170 * range 171 * @interval_sub: The interval subscription 172 * 173 * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a 174 * collision-retry scheme similar to seqcount for the VA range under 175 * subscription. If the mm invokes invalidation during the critical section 176 * then mmu_interval_read_retry() will return true. 177 * 178 * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs 179 * require a blocking context. The critical region formed by this can sleep, 180 * and the required 'user_lock' can also be a sleeping lock. 181 * 182 * The caller is required to provide a 'user_lock' to serialize both teardown 183 * and setup. 184 * 185 * The return value should be passed to mmu_interval_read_retry(). 186 */ 187 unsigned long 188 mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub) 189 { 190 struct mmu_notifier_subscriptions *subscriptions = 191 interval_sub->mm->notifier_subscriptions; 192 unsigned long seq; 193 bool is_invalidating; 194 195 /* 196 * If the subscription has a different seq value under the user_lock 197 * than we started with then it has collided. 198 * 199 * If the subscription currently has the same seq value as the 200 * subscriptions seq, then it is currently between 201 * invalidate_start/end and is colliding. 202 * 203 * The locking looks broadly like this: 204 * mn_itree_inv_start(): mmu_interval_read_begin(): 205 * spin_lock 206 * seq = READ_ONCE(interval_sub->invalidate_seq); 207 * seq == subs->invalidate_seq 208 * spin_unlock 209 * spin_lock 210 * seq = ++subscriptions->invalidate_seq 211 * spin_unlock 212 * op->invalidate(): 213 * user_lock 214 * mmu_interval_set_seq() 215 * interval_sub->invalidate_seq = seq 216 * user_unlock 217 * 218 * [Required: mmu_interval_read_retry() == true] 219 * 220 * mn_itree_inv_end(): 221 * spin_lock 222 * seq = ++subscriptions->invalidate_seq 223 * spin_unlock 224 * 225 * user_lock 226 * mmu_interval_read_retry(): 227 * interval_sub->invalidate_seq != seq 228 * user_unlock 229 * 230 * Barriers are not needed here as any races here are closed by an 231 * eventual mmu_interval_read_retry(), which provides a barrier via the 232 * user_lock. 233 */ 234 spin_lock(&subscriptions->lock); 235 /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */ 236 seq = READ_ONCE(interval_sub->invalidate_seq); 237 is_invalidating = seq == subscriptions->invalidate_seq; 238 spin_unlock(&subscriptions->lock); 239 240 /* 241 * interval_sub->invalidate_seq must always be set to an odd value via 242 * mmu_interval_set_seq() using the provided cur_seq from 243 * mn_itree_inv_start_range(). This ensures that if seq does wrap we 244 * will always clear the below sleep in some reasonable time as 245 * subscriptions->invalidate_seq is even in the idle state. 246 */ 247 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 248 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 249 if (is_invalidating) 250 wait_event(subscriptions->wq, 251 READ_ONCE(subscriptions->invalidate_seq) != seq); 252 253 /* 254 * Notice that mmu_interval_read_retry() can already be true at this 255 * point, avoiding loops here allows the caller to provide a global 256 * time bound. 257 */ 258 259 return seq; 260 } 261 EXPORT_SYMBOL_GPL(mmu_interval_read_begin); 262 263 static void mn_itree_finish_pass(struct llist_head *finish_passes) 264 { 265 struct llist_node *first = llist_reverse_order(__llist_del_all(finish_passes)); 266 struct mmu_interval_notifier_finish *f, *next; 267 268 llist_for_each_entry_safe(f, next, first, link) 269 f->notifier->ops->invalidate_finish(f); 270 } 271 272 static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions, 273 struct mm_struct *mm) 274 { 275 struct mmu_notifier_range range = { 276 .flags = MMU_NOTIFIER_RANGE_BLOCKABLE, 277 .event = MMU_NOTIFY_RELEASE, 278 .mm = mm, 279 .start = 0, 280 .end = ULONG_MAX, 281 }; 282 struct mmu_interval_notifier *interval_sub; 283 LLIST_HEAD(finish_passes); 284 unsigned long cur_seq; 285 bool ret; 286 287 for (interval_sub = 288 mn_itree_inv_start_range(subscriptions, &range, &cur_seq); 289 interval_sub; 290 interval_sub = mn_itree_inv_next(interval_sub, &range)) { 291 if (interval_sub->ops->invalidate_start) { 292 struct mmu_interval_notifier_finish *finish = NULL; 293 294 ret = interval_sub->ops->invalidate_start(interval_sub, 295 &range, 296 cur_seq, 297 &finish); 298 if (ret && finish) { 299 finish->notifier = interval_sub; 300 __llist_add(&finish->link, &finish_passes); 301 } 302 303 } else { 304 ret = interval_sub->ops->invalidate(interval_sub, 305 &range, 306 cur_seq); 307 } 308 WARN_ON(!ret); 309 } 310 311 mn_itree_finish_pass(&finish_passes); 312 mn_itree_inv_end(subscriptions); 313 } 314 315 /* 316 * This function can't run concurrently against mmu_notifier_register 317 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap 318 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers 319 * in parallel despite there being no task using this mm any more, 320 * through the vmas outside of the exit_mmap context, such as with 321 * vmtruncate. This serializes against mmu_notifier_unregister with 322 * the notifier_subscriptions->lock in addition to SRCU and it serializes 323 * against the other mmu notifiers with SRCU. struct mmu_notifier_subscriptions 324 * can't go away from under us as exit_mmap holds an mm_count pin 325 * itself. 326 */ 327 static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions, 328 struct mm_struct *mm) 329 { 330 struct mmu_notifier *subscription; 331 int id; 332 333 /* 334 * SRCU here will block mmu_notifier_unregister until 335 * ->release returns. 336 */ 337 id = srcu_read_lock(&srcu); 338 hlist_for_each_entry_srcu(subscription, &subscriptions->list, hlist, 339 srcu_read_lock_held(&srcu)) 340 /* 341 * If ->release runs before mmu_notifier_unregister it must be 342 * handled, as it's the only way for the driver to flush all 343 * existing sptes and stop the driver from establishing any more 344 * sptes before all the pages in the mm are freed. 345 */ 346 if (subscription->ops->release) 347 subscription->ops->release(subscription, mm); 348 349 spin_lock(&subscriptions->lock); 350 while (unlikely(!hlist_empty(&subscriptions->list))) { 351 subscription = hlist_entry(subscriptions->list.first, 352 struct mmu_notifier, hlist); 353 /* 354 * We arrived before mmu_notifier_unregister so 355 * mmu_notifier_unregister will do nothing other than to wait 356 * for ->release to finish and for mmu_notifier_unregister to 357 * return. 358 */ 359 hlist_del_init_rcu(&subscription->hlist); 360 } 361 spin_unlock(&subscriptions->lock); 362 srcu_read_unlock(&srcu, id); 363 364 /* 365 * synchronize_srcu here prevents mmu_notifier_release from returning to 366 * exit_mmap (which would proceed with freeing all pages in the mm) 367 * until the ->release method returns, if it was invoked by 368 * mmu_notifier_unregister. 369 * 370 * The notifier_subscriptions can't go away from under us because 371 * one mm_count is held by exit_mmap. 372 */ 373 synchronize_srcu(&srcu); 374 } 375 376 void __mmu_notifier_release(struct mm_struct *mm) 377 { 378 struct mmu_notifier_subscriptions *subscriptions = 379 mm->notifier_subscriptions; 380 381 if (subscriptions->has_itree) 382 mn_itree_release(subscriptions, mm); 383 384 if (!hlist_empty(&subscriptions->list)) 385 mn_hlist_release(subscriptions, mm); 386 } 387 388 /* 389 * If no young bitflag is supported by the hardware, ->clear_flush_young can 390 * unmap the address and return 1 or 0 depending if the mapping previously 391 * existed or not. 392 */ 393 bool __mmu_notifier_clear_flush_young(struct mm_struct *mm, 394 unsigned long start, unsigned long end) 395 { 396 struct mmu_notifier *subscription; 397 bool young = false; 398 int id; 399 400 id = srcu_read_lock(&srcu); 401 hlist_for_each_entry_srcu(subscription, 402 &mm->notifier_subscriptions->list, hlist, 403 srcu_read_lock_held(&srcu)) { 404 if (subscription->ops->clear_flush_young) 405 young |= subscription->ops->clear_flush_young( 406 subscription, mm, start, end); 407 } 408 srcu_read_unlock(&srcu, id); 409 410 return young; 411 } 412 413 bool __mmu_notifier_clear_young(struct mm_struct *mm, 414 unsigned long start, unsigned long end) 415 { 416 struct mmu_notifier *subscription; 417 bool young = false; 418 int id; 419 420 id = srcu_read_lock(&srcu); 421 hlist_for_each_entry_srcu(subscription, 422 &mm->notifier_subscriptions->list, hlist, 423 srcu_read_lock_held(&srcu)) { 424 if (subscription->ops->clear_young) 425 young |= subscription->ops->clear_young(subscription, 426 mm, start, end); 427 } 428 srcu_read_unlock(&srcu, id); 429 430 return young; 431 } 432 433 bool __mmu_notifier_test_young(struct mm_struct *mm, 434 unsigned long address) 435 { 436 struct mmu_notifier *subscription; 437 bool young = false; 438 int id; 439 440 id = srcu_read_lock(&srcu); 441 hlist_for_each_entry_srcu(subscription, 442 &mm->notifier_subscriptions->list, hlist, 443 srcu_read_lock_held(&srcu)) { 444 if (subscription->ops->test_young) { 445 young = subscription->ops->test_young(subscription, mm, 446 address); 447 if (young) 448 break; 449 } 450 } 451 srcu_read_unlock(&srcu, id); 452 453 return young; 454 } 455 456 static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions, 457 const struct mmu_notifier_range *range) 458 { 459 struct mmu_interval_notifier *interval_sub; 460 LLIST_HEAD(finish_passes); 461 unsigned long cur_seq; 462 int err = 0; 463 464 for (interval_sub = 465 mn_itree_inv_start_range(subscriptions, range, &cur_seq); 466 interval_sub; 467 interval_sub = mn_itree_inv_next(interval_sub, range)) { 468 bool ret; 469 470 if (interval_sub->ops->invalidate_start) { 471 struct mmu_interval_notifier_finish *finish = NULL; 472 473 ret = interval_sub->ops->invalidate_start(interval_sub, 474 range, 475 cur_seq, 476 &finish); 477 if (ret && finish) { 478 finish->notifier = interval_sub; 479 __llist_add(&finish->link, &finish_passes); 480 } 481 482 } else { 483 ret = interval_sub->ops->invalidate(interval_sub, 484 range, 485 cur_seq); 486 } 487 if (!ret) { 488 if (WARN_ON(mmu_notifier_range_blockable(range))) 489 continue; 490 err = -EAGAIN; 491 break; 492 } 493 } 494 495 mn_itree_finish_pass(&finish_passes); 496 497 /* 498 * On -EAGAIN the non-blocking caller is not allowed to call 499 * invalidate_range_end() 500 */ 501 if (err) 502 mn_itree_inv_end(subscriptions); 503 504 return err; 505 } 506 507 static int mn_hlist_invalidate_range_start( 508 struct mmu_notifier_subscriptions *subscriptions, 509 struct mmu_notifier_range *range) 510 { 511 struct mmu_notifier *subscription; 512 int ret = 0; 513 int id; 514 515 id = srcu_read_lock(&srcu); 516 hlist_for_each_entry_srcu(subscription, &subscriptions->list, hlist, 517 srcu_read_lock_held(&srcu)) { 518 const struct mmu_notifier_ops *ops = subscription->ops; 519 520 if (ops->invalidate_range_start) { 521 int _ret; 522 523 if (!mmu_notifier_range_blockable(range)) 524 non_block_start(); 525 _ret = ops->invalidate_range_start(subscription, range); 526 if (!mmu_notifier_range_blockable(range)) 527 non_block_end(); 528 if (_ret) { 529 pr_info("%pS callback failed with %d in %sblockable context.\n", 530 ops->invalidate_range_start, _ret, 531 !mmu_notifier_range_blockable(range) ? 532 "non-" : 533 ""); 534 WARN_ON(mmu_notifier_range_blockable(range) || 535 _ret != -EAGAIN); 536 /* 537 * We call all the notifiers on any EAGAIN, 538 * there is no way for a notifier to know if 539 * its start method failed, thus a start that 540 * does EAGAIN can't also do end. 541 */ 542 WARN_ON(ops->invalidate_range_end); 543 ret = _ret; 544 } 545 } 546 } 547 548 if (ret) { 549 /* 550 * Must be non-blocking to get here. If there are multiple 551 * notifiers and one or more failed start, any that succeeded 552 * start are expecting their end to be called. Do so now. 553 */ 554 hlist_for_each_entry_srcu(subscription, &subscriptions->list, 555 hlist, srcu_read_lock_held(&srcu)) { 556 if (!subscription->ops->invalidate_range_end) 557 continue; 558 559 subscription->ops->invalidate_range_end(subscription, 560 range); 561 } 562 } 563 srcu_read_unlock(&srcu, id); 564 565 return ret; 566 } 567 568 int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) 569 { 570 struct mmu_notifier_subscriptions *subscriptions = 571 range->mm->notifier_subscriptions; 572 int ret; 573 574 if (subscriptions->has_itree) { 575 ret = mn_itree_invalidate(subscriptions, range); 576 if (ret) 577 return ret; 578 } 579 if (!hlist_empty(&subscriptions->list)) 580 return mn_hlist_invalidate_range_start(subscriptions, range); 581 return 0; 582 } 583 584 static void 585 mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions, 586 struct mmu_notifier_range *range) 587 { 588 struct mmu_notifier *subscription; 589 int id; 590 591 id = srcu_read_lock(&srcu); 592 hlist_for_each_entry_srcu(subscription, &subscriptions->list, hlist, 593 srcu_read_lock_held(&srcu)) { 594 if (subscription->ops->invalidate_range_end) { 595 if (!mmu_notifier_range_blockable(range)) 596 non_block_start(); 597 subscription->ops->invalidate_range_end(subscription, 598 range); 599 if (!mmu_notifier_range_blockable(range)) 600 non_block_end(); 601 } 602 } 603 srcu_read_unlock(&srcu, id); 604 } 605 606 void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range) 607 { 608 struct mmu_notifier_subscriptions *subscriptions = 609 range->mm->notifier_subscriptions; 610 611 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 612 if (subscriptions->has_itree) 613 mn_itree_inv_end(subscriptions); 614 615 if (!hlist_empty(&subscriptions->list)) 616 mn_hlist_invalidate_end(subscriptions, range); 617 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 618 } 619 620 void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, 621 unsigned long start, unsigned long end) 622 { 623 struct mmu_notifier *subscription; 624 int id; 625 626 id = srcu_read_lock(&srcu); 627 hlist_for_each_entry_srcu(subscription, 628 &mm->notifier_subscriptions->list, hlist, 629 srcu_read_lock_held(&srcu)) { 630 if (subscription->ops->arch_invalidate_secondary_tlbs) 631 subscription->ops->arch_invalidate_secondary_tlbs( 632 subscription, mm, 633 start, end); 634 } 635 srcu_read_unlock(&srcu, id); 636 } 637 638 /* 639 * Same as mmu_notifier_register but here the caller must hold the mmap_lock in 640 * write mode. A NULL mn signals the notifier is being registered for itree 641 * mode. 642 */ 643 int __mmu_notifier_register(struct mmu_notifier *subscription, 644 struct mm_struct *mm) 645 { 646 struct mmu_notifier_subscriptions *subscriptions = NULL; 647 int ret; 648 649 mmap_assert_write_locked(mm); 650 BUG_ON(atomic_read(&mm->mm_users) <= 0); 651 652 /* 653 * Subsystems should only register for invalidate_secondary_tlbs() or 654 * invalidate_range_start()/end() callbacks, not both. 655 */ 656 if (WARN_ON_ONCE(subscription && 657 (subscription->ops->arch_invalidate_secondary_tlbs && 658 (subscription->ops->invalidate_range_start || 659 subscription->ops->invalidate_range_end)))) 660 return -EINVAL; 661 662 if (!mm->notifier_subscriptions) { 663 /* 664 * kmalloc cannot be called under mm_take_all_locks(), but we 665 * know that mm->notifier_subscriptions can't change while we 666 * hold the write side of the mmap_lock. 667 */ 668 subscriptions = kzalloc_obj(struct mmu_notifier_subscriptions); 669 if (!subscriptions) 670 return -ENOMEM; 671 672 INIT_HLIST_HEAD(&subscriptions->list); 673 spin_lock_init(&subscriptions->lock); 674 subscriptions->invalidate_seq = 2; 675 subscriptions->itree = RB_ROOT_CACHED; 676 init_waitqueue_head(&subscriptions->wq); 677 INIT_HLIST_HEAD(&subscriptions->deferred_list); 678 } 679 680 ret = mm_take_all_locks(mm); 681 if (unlikely(ret)) 682 goto out_clean; 683 684 /* 685 * Serialize the update against mmu_notifier_unregister. A 686 * side note: mmu_notifier_release can't run concurrently with 687 * us because we hold the mm_users pin (either implicitly as 688 * current->mm or explicitly with get_task_mm() or similar). 689 * We can't race against any other mmu notifier method either 690 * thanks to mm_take_all_locks(). 691 * 692 * release semantics on the initialization of the 693 * mmu_notifier_subscriptions's contents are provided for unlocked 694 * readers. acquire can only be used while holding the mmgrab or 695 * mmget, and is safe because once created the 696 * mmu_notifier_subscriptions is not freed until the mm is destroyed. 697 * As above, users holding the mmap_lock or one of the 698 * mm_take_all_locks() do not need to use acquire semantics. 699 */ 700 if (subscriptions) 701 smp_store_release(&mm->notifier_subscriptions, subscriptions); 702 703 if (subscription) { 704 /* Pairs with the mmdrop in mmu_notifier_unregister_* */ 705 mmgrab(mm); 706 subscription->mm = mm; 707 subscription->users = 1; 708 709 spin_lock(&mm->notifier_subscriptions->lock); 710 hlist_add_head_rcu(&subscription->hlist, 711 &mm->notifier_subscriptions->list); 712 spin_unlock(&mm->notifier_subscriptions->lock); 713 } else 714 mm->notifier_subscriptions->has_itree = true; 715 716 mm_drop_all_locks(mm); 717 BUG_ON(atomic_read(&mm->mm_users) <= 0); 718 return 0; 719 720 out_clean: 721 kfree(subscriptions); 722 return ret; 723 } 724 EXPORT_SYMBOL_GPL(__mmu_notifier_register); 725 726 /** 727 * mmu_notifier_register - Register a notifier on a mm 728 * @subscription: The notifier to attach 729 * @mm: The mm to attach the notifier to 730 * 731 * Must not hold mmap_lock nor any other VM related lock when calling 732 * this registration function. Must also ensure mm_users can't go down 733 * to zero while this runs to avoid races with mmu_notifier_release, 734 * so mm has to be current->mm or the mm should be pinned safely such 735 * as with get_task_mm(). If the mm is not current->mm, the mm_users 736 * pin should be released by calling mmput after mmu_notifier_register 737 * returns. 738 * 739 * mmu_notifier_unregister() or mmu_notifier_put() must be always called to 740 * unregister the notifier. 741 * 742 * While the caller has a mmu_notifier get the subscription->mm pointer will remain 743 * valid, and can be converted to an active mm pointer via mmget_not_zero(). 744 */ 745 int mmu_notifier_register(struct mmu_notifier *subscription, 746 struct mm_struct *mm) 747 { 748 int ret; 749 750 mmap_write_lock(mm); 751 ret = __mmu_notifier_register(subscription, mm); 752 mmap_write_unlock(mm); 753 return ret; 754 } 755 EXPORT_SYMBOL_GPL(mmu_notifier_register); 756 757 static struct mmu_notifier * 758 find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops) 759 { 760 struct mmu_notifier *subscription; 761 762 spin_lock(&mm->notifier_subscriptions->lock); 763 hlist_for_each_entry_srcu(subscription, 764 &mm->notifier_subscriptions->list, hlist, 765 lockdep_is_held(&mm->notifier_subscriptions->lock)) { 766 if (subscription->ops != ops) 767 continue; 768 769 if (likely(subscription->users != UINT_MAX)) 770 subscription->users++; 771 else 772 subscription = ERR_PTR(-EOVERFLOW); 773 spin_unlock(&mm->notifier_subscriptions->lock); 774 return subscription; 775 } 776 spin_unlock(&mm->notifier_subscriptions->lock); 777 return NULL; 778 } 779 780 /** 781 * mmu_notifier_get_locked - Return the single struct mmu_notifier for 782 * the mm & ops 783 * @ops: The operations struct being subscribe with 784 * @mm : The mm to attach notifiers too 785 * 786 * This function either allocates a new mmu_notifier via 787 * ops->alloc_notifier(), or returns an already existing notifier on the 788 * list. The value of the ops pointer is used to determine when two notifiers 789 * are the same. 790 * 791 * Each call to mmu_notifier_get() must be paired with a call to 792 * mmu_notifier_put(). The caller must hold the write side of mm->mmap_lock. 793 * 794 * While the caller has a mmu_notifier get the mm pointer will remain valid, 795 * and can be converted to an active mm pointer via mmget_not_zero(). 796 */ 797 struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops, 798 struct mm_struct *mm) 799 { 800 struct mmu_notifier *subscription; 801 int ret; 802 803 mmap_assert_write_locked(mm); 804 805 if (mm->notifier_subscriptions) { 806 subscription = find_get_mmu_notifier(mm, ops); 807 if (subscription) 808 return subscription; 809 } 810 811 subscription = ops->alloc_notifier(mm); 812 if (IS_ERR(subscription)) 813 return subscription; 814 subscription->ops = ops; 815 ret = __mmu_notifier_register(subscription, mm); 816 if (ret) 817 goto out_free; 818 return subscription; 819 out_free: 820 subscription->ops->free_notifier(subscription); 821 return ERR_PTR(ret); 822 } 823 EXPORT_SYMBOL_GPL(mmu_notifier_get_locked); 824 825 /* this is called after the last mmu_notifier_unregister() returned */ 826 void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm) 827 { 828 BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list)); 829 kfree(mm->notifier_subscriptions); 830 mm->notifier_subscriptions = LIST_POISON1; /* debug */ 831 } 832 833 /* 834 * This releases the mm_count pin automatically and frees the mm 835 * structure if it was the last user of it. It serializes against 836 * running mmu notifiers with SRCU and against mmu_notifier_unregister 837 * with the unregister lock + SRCU. All sptes must be dropped before 838 * calling mmu_notifier_unregister. ->release or any other notifier 839 * method may be invoked concurrently with mmu_notifier_unregister, 840 * and only after mmu_notifier_unregister returned we're guaranteed 841 * that ->release or any other method can't run anymore. 842 */ 843 void mmu_notifier_unregister(struct mmu_notifier *subscription, 844 struct mm_struct *mm) 845 { 846 BUG_ON(atomic_read(&mm->mm_count) <= 0); 847 848 if (!hlist_unhashed(&subscription->hlist)) { 849 /* 850 * SRCU here will force exit_mmap to wait for ->release to 851 * finish before freeing the pages. 852 */ 853 int id; 854 855 id = srcu_read_lock(&srcu); 856 /* 857 * exit_mmap will block in mmu_notifier_release to guarantee 858 * that ->release is called before freeing the pages. 859 */ 860 if (subscription->ops->release) 861 subscription->ops->release(subscription, mm); 862 srcu_read_unlock(&srcu, id); 863 864 spin_lock(&mm->notifier_subscriptions->lock); 865 /* 866 * Can not use list_del_rcu() since __mmu_notifier_release 867 * can delete it before we hold the lock. 868 */ 869 hlist_del_init_rcu(&subscription->hlist); 870 spin_unlock(&mm->notifier_subscriptions->lock); 871 } 872 873 /* 874 * Wait for any running method to finish, of course including 875 * ->release if it was run by mmu_notifier_release instead of us. 876 */ 877 synchronize_srcu(&srcu); 878 879 BUG_ON(atomic_read(&mm->mm_count) <= 0); 880 881 mmdrop(mm); 882 } 883 EXPORT_SYMBOL_GPL(mmu_notifier_unregister); 884 885 static void mmu_notifier_free_rcu(struct rcu_head *rcu) 886 { 887 struct mmu_notifier *subscription = 888 container_of(rcu, struct mmu_notifier, rcu); 889 struct mm_struct *mm = subscription->mm; 890 891 subscription->ops->free_notifier(subscription); 892 /* Pairs with the get in __mmu_notifier_register() */ 893 mmdrop(mm); 894 } 895 896 /** 897 * mmu_notifier_put - Release the reference on the notifier 898 * @subscription: The notifier to act on 899 * 900 * This function must be paired with each mmu_notifier_get(), it releases the 901 * reference obtained by the get. If this is the last reference then process 902 * to free the notifier will be run asynchronously. 903 * 904 * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release 905 * when the mm_struct is destroyed. Instead free_notifier is always called to 906 * release any resources held by the user. 907 * 908 * As ops->release is not guaranteed to be called, the user must ensure that 909 * all sptes are dropped, and no new sptes can be established before 910 * mmu_notifier_put() is called. 911 * 912 * This function can be called from the ops->release callback, however the 913 * caller must still ensure it is called pairwise with mmu_notifier_get(). 914 * 915 * Modules calling this function must call mmu_notifier_synchronize() in 916 * their __exit functions to ensure the async work is completed. 917 */ 918 void mmu_notifier_put(struct mmu_notifier *subscription) 919 { 920 struct mm_struct *mm = subscription->mm; 921 922 spin_lock(&mm->notifier_subscriptions->lock); 923 if (WARN_ON(!subscription->users) || --subscription->users) 924 goto out_unlock; 925 hlist_del_init_rcu(&subscription->hlist); 926 spin_unlock(&mm->notifier_subscriptions->lock); 927 928 call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu); 929 return; 930 931 out_unlock: 932 spin_unlock(&mm->notifier_subscriptions->lock); 933 } 934 EXPORT_SYMBOL_GPL(mmu_notifier_put); 935 936 static int __mmu_interval_notifier_insert( 937 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm, 938 struct mmu_notifier_subscriptions *subscriptions, unsigned long start, 939 unsigned long length, const struct mmu_interval_notifier_ops *ops) 940 { 941 interval_sub->mm = mm; 942 interval_sub->ops = ops; 943 RB_CLEAR_NODE(&interval_sub->interval_tree.rb); 944 interval_sub->interval_tree.start = start; 945 /* 946 * Note that the representation of the intervals in the interval tree 947 * considers the ending point as contained in the interval. 948 */ 949 if (length == 0 || 950 check_add_overflow(start, length - 1, 951 &interval_sub->interval_tree.last)) 952 return -EOVERFLOW; 953 954 /* Must call with a mmget() held */ 955 if (WARN_ON(atomic_read(&mm->mm_users) <= 0)) 956 return -EINVAL; 957 958 /* pairs with mmdrop in mmu_interval_notifier_remove() */ 959 mmgrab(mm); 960 961 /* 962 * If some invalidate_range_start/end region is going on in parallel 963 * we don't know what VA ranges are affected, so we must assume this 964 * new range is included. 965 * 966 * If the itree is invalidating then we are not allowed to change 967 * it. Retrying until invalidation is done is tricky due to the 968 * possibility for live lock, instead defer the add to 969 * mn_itree_inv_end() so this algorithm is deterministic. 970 * 971 * In all cases the value for the interval_sub->invalidate_seq should be 972 * odd, see mmu_interval_read_begin() 973 */ 974 spin_lock(&subscriptions->lock); 975 if (subscriptions->active_invalidate_ranges) { 976 if (mn_itree_is_invalidating(subscriptions)) 977 hlist_add_head(&interval_sub->deferred_item, 978 &subscriptions->deferred_list); 979 else { 980 subscriptions->invalidate_seq |= 1; 981 interval_tree_insert(&interval_sub->interval_tree, 982 &subscriptions->itree); 983 } 984 interval_sub->invalidate_seq = subscriptions->invalidate_seq; 985 } else { 986 WARN_ON(mn_itree_is_invalidating(subscriptions)); 987 /* 988 * The starting seq for a subscription not under invalidation 989 * should be odd, not equal to the current invalidate_seq and 990 * invalidate_seq should not 'wrap' to the new seq any time 991 * soon. 992 */ 993 interval_sub->invalidate_seq = 994 subscriptions->invalidate_seq - 1; 995 interval_tree_insert(&interval_sub->interval_tree, 996 &subscriptions->itree); 997 } 998 spin_unlock(&subscriptions->lock); 999 return 0; 1000 } 1001 1002 /** 1003 * mmu_interval_notifier_insert - Insert an interval notifier 1004 * @interval_sub: Interval subscription to register 1005 * @start: Starting virtual address to monitor 1006 * @length: Length of the range to monitor 1007 * @mm: mm_struct to attach to 1008 * @ops: Interval notifier operations to be called on matching events 1009 * 1010 * This function subscribes the interval notifier for notifications from the 1011 * mm. Upon return the ops related to mmu_interval_notifier will be called 1012 * whenever an event that intersects with the given range occurs. 1013 * 1014 * Upon return the range_notifier may not be present in the interval tree yet. 1015 * The caller must use the normal interval notifier read flow via 1016 * mmu_interval_read_begin() to establish SPTEs for this range. 1017 */ 1018 int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub, 1019 struct mm_struct *mm, unsigned long start, 1020 unsigned long length, 1021 const struct mmu_interval_notifier_ops *ops) 1022 { 1023 struct mmu_notifier_subscriptions *subscriptions; 1024 int ret; 1025 1026 WARN_ON_ONCE(ops->invalidate_start && !ops->invalidate_finish); 1027 might_lock(&mm->mmap_lock); 1028 1029 subscriptions = smp_load_acquire(&mm->notifier_subscriptions); 1030 if (!subscriptions || !subscriptions->has_itree) { 1031 ret = mmu_notifier_register(NULL, mm); 1032 if (ret) 1033 return ret; 1034 subscriptions = mm->notifier_subscriptions; 1035 } 1036 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions, 1037 start, length, ops); 1038 } 1039 EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert); 1040 1041 int mmu_interval_notifier_insert_locked( 1042 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm, 1043 unsigned long start, unsigned long length, 1044 const struct mmu_interval_notifier_ops *ops) 1045 { 1046 struct mmu_notifier_subscriptions *subscriptions = 1047 mm->notifier_subscriptions; 1048 int ret; 1049 1050 mmap_assert_write_locked(mm); 1051 1052 if (!subscriptions || !subscriptions->has_itree) { 1053 ret = __mmu_notifier_register(NULL, mm); 1054 if (ret) 1055 return ret; 1056 subscriptions = mm->notifier_subscriptions; 1057 } 1058 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions, 1059 start, length, ops); 1060 } 1061 EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked); 1062 1063 static bool 1064 mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions, 1065 unsigned long seq) 1066 { 1067 bool ret; 1068 1069 spin_lock(&subscriptions->lock); 1070 ret = subscriptions->invalidate_seq != seq; 1071 spin_unlock(&subscriptions->lock); 1072 return ret; 1073 } 1074 1075 /** 1076 * mmu_interval_notifier_remove - Remove a interval notifier 1077 * @interval_sub: Interval subscription to unregister 1078 * 1079 * This function must be paired with mmu_interval_notifier_insert(). It cannot 1080 * be called from any ops callback. 1081 * 1082 * Once this returns ops callbacks are no longer running on other CPUs and 1083 * will not be called in future. 1084 */ 1085 void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub) 1086 { 1087 struct mm_struct *mm = interval_sub->mm; 1088 struct mmu_notifier_subscriptions *subscriptions = 1089 mm->notifier_subscriptions; 1090 unsigned long seq = 0; 1091 1092 might_sleep(); 1093 1094 spin_lock(&subscriptions->lock); 1095 if (mn_itree_is_invalidating(subscriptions)) { 1096 /* 1097 * remove is being called after insert put this on the 1098 * deferred list, but before the deferred list was processed. 1099 */ 1100 if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) { 1101 hlist_del(&interval_sub->deferred_item); 1102 } else { 1103 hlist_add_head(&interval_sub->deferred_item, 1104 &subscriptions->deferred_list); 1105 seq = subscriptions->invalidate_seq; 1106 } 1107 } else { 1108 WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb)); 1109 interval_tree_remove(&interval_sub->interval_tree, 1110 &subscriptions->itree); 1111 } 1112 spin_unlock(&subscriptions->lock); 1113 1114 /* 1115 * The possible sleep on progress in the invalidation requires the 1116 * caller not hold any locks held by invalidation callbacks. 1117 */ 1118 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 1119 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 1120 if (seq) 1121 wait_event(subscriptions->wq, 1122 mmu_interval_seq_released(subscriptions, seq)); 1123 1124 /* pairs with mmgrab in mmu_interval_notifier_insert() */ 1125 mmdrop(mm); 1126 } 1127 EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove); 1128 1129 /** 1130 * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed 1131 * 1132 * This function ensures that all outstanding async SRU work from 1133 * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops 1134 * associated with an unused mmu_notifier will no longer be called. 1135 * 1136 * Before using the caller must ensure that all of its mmu_notifiers have been 1137 * fully released via mmu_notifier_put(). 1138 * 1139 * Modules using the mmu_notifier_put() API should call this in their __exit 1140 * function to avoid module unloading races. 1141 */ 1142 void mmu_notifier_synchronize(void) 1143 { 1144 synchronize_srcu(&srcu); 1145 } 1146 EXPORT_SYMBOL_GPL(mmu_notifier_synchronize); 1147