Lines Matching +full:mm +full:- +full:0
1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/mm/mmu_notifier.c
13 #include <linux/mm.h>
19 #include <linux/sched/mm.h>
35 * mm->notifier_subscriptions inside the mm_take_all_locks() protected
40 /* all mmu notifiers registered in this mm are queued in this list */
53 * This is a collision-retry read-side/write-side 'lock', a lot like a
54 * seqcount, however this allows multiple write-sides to hold it at
56 * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any
59 * Note that the core mm creates nested invalidate_range_start()/end() regions
62 * progress on the mm side.
69 * - mm->active_invalidate_ranges != 0
70 * - subscriptions->invalidate_seq & 1 == True (odd)
71 * - some range on the mm_struct is being invalidated
72 * - the itree is not allowed to change
75 * - mm->active_invalidate_ranges != 0
76 * - subscriptions->invalidate_seq & 1 == False (even)
77 * - some range on the mm_struct is being invalidated
78 * - the itree is allowed to change
80 * Operations on notifier_subscriptions->invalidate_seq (under spinlock):
91 lockdep_assert_held(&subscriptions->lock); in mn_itree_is_invalidating()
92 return subscriptions->invalidate_seq & 1; in mn_itree_is_invalidating()
103 spin_lock(&subscriptions->lock); in mn_itree_inv_start_range()
104 subscriptions->active_invalidate_ranges++; in mn_itree_inv_start_range()
105 node = interval_tree_iter_first(&subscriptions->itree, range->start, in mn_itree_inv_start_range()
106 range->end - 1); in mn_itree_inv_start_range()
108 subscriptions->invalidate_seq |= 1; in mn_itree_inv_start_range()
113 *seq = subscriptions->invalidate_seq; in mn_itree_inv_start_range()
114 spin_unlock(&subscriptions->lock); in mn_itree_inv_start_range()
124 node = interval_tree_iter_next(&interval_sub->interval_tree, in mn_itree_inv_next()
125 range->start, range->end - 1); in mn_itree_inv_next()
136 spin_lock(&subscriptions->lock); in mn_itree_inv_end()
137 if (--subscriptions->active_invalidate_ranges || in mn_itree_inv_end()
139 spin_unlock(&subscriptions->lock); in mn_itree_inv_end()
144 subscriptions->invalidate_seq++; in mn_itree_inv_end()
153 &subscriptions->deferred_list, in mn_itree_inv_end()
155 if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) in mn_itree_inv_end()
156 interval_tree_insert(&interval_sub->interval_tree, in mn_itree_inv_end()
157 &subscriptions->itree); in mn_itree_inv_end()
159 interval_tree_remove(&interval_sub->interval_tree, in mn_itree_inv_end()
160 &subscriptions->itree); in mn_itree_inv_end()
161 hlist_del(&interval_sub->deferred_item); in mn_itree_inv_end()
163 spin_unlock(&subscriptions->lock); in mn_itree_inv_end()
165 wake_up_all(&subscriptions->wq); in mn_itree_inv_end()
169 * mmu_interval_read_begin - Begin a read side critical section against a VA
174 * collision-retry scheme similar to seqcount for the VA range under
175 * subscription. If the mm invokes invalidation during the critical section
191 interval_sub->mm->notifier_subscriptions; in mmu_interval_read_begin()
206 * seq = READ_ONCE(interval_sub->invalidate_seq); in mmu_interval_read_begin()
207 * seq == subs->invalidate_seq in mmu_interval_read_begin()
210 * seq = ++subscriptions->invalidate_seq in mmu_interval_read_begin()
212 * op->invalidate(): in mmu_interval_read_begin()
215 * interval_sub->invalidate_seq = seq in mmu_interval_read_begin()
222 * seq = ++subscriptions->invalidate_seq in mmu_interval_read_begin()
227 * interval_sub->invalidate_seq != seq in mmu_interval_read_begin()
234 spin_lock(&subscriptions->lock); in mmu_interval_read_begin()
236 seq = READ_ONCE(interval_sub->invalidate_seq); in mmu_interval_read_begin()
237 is_invalidating = seq == subscriptions->invalidate_seq; in mmu_interval_read_begin()
238 spin_unlock(&subscriptions->lock); in mmu_interval_read_begin()
241 * interval_sub->invalidate_seq must always be set to an odd value via in mmu_interval_read_begin()
245 * subscriptions->invalidate_seq is even in the idle state. in mmu_interval_read_begin()
250 wait_event(subscriptions->wq, in mmu_interval_read_begin()
251 READ_ONCE(subscriptions->invalidate_seq) != seq); in mmu_interval_read_begin()
264 struct mm_struct *mm) in mn_itree_release() argument
269 .mm = mm, in mn_itree_release()
270 .start = 0, in mn_itree_release()
281 ret = interval_sub->ops->invalidate(interval_sub, &range, in mn_itree_release()
291 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
292 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
293 * in parallel despite there being no task using this mm any more,
296 * the notifier_subscriptions->lock in addition to SRCU and it serializes
302 struct mm_struct *mm) in mn_hlist_release() argument
309 * ->release returns. in mn_hlist_release()
312 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, in mn_hlist_release()
315 * If ->release runs before mmu_notifier_unregister it must be in mn_hlist_release()
318 * sptes before all the pages in the mm are freed. in mn_hlist_release()
320 if (subscription->ops->release) in mn_hlist_release()
321 subscription->ops->release(subscription, mm); in mn_hlist_release()
323 spin_lock(&subscriptions->lock); in mn_hlist_release()
324 while (unlikely(!hlist_empty(&subscriptions->list))) { in mn_hlist_release()
325 subscription = hlist_entry(subscriptions->list.first, in mn_hlist_release()
330 * for ->release to finish and for mmu_notifier_unregister to in mn_hlist_release()
333 hlist_del_init_rcu(&subscription->hlist); in mn_hlist_release()
335 spin_unlock(&subscriptions->lock); in mn_hlist_release()
340 * exit_mmap (which would proceed with freeing all pages in the mm) in mn_hlist_release()
341 * until the ->release method returns, if it was invoked by in mn_hlist_release()
350 void __mmu_notifier_release(struct mm_struct *mm) in __mmu_notifier_release() argument
353 mm->notifier_subscriptions; in __mmu_notifier_release()
355 if (subscriptions->has_itree) in __mmu_notifier_release()
356 mn_itree_release(subscriptions, mm); in __mmu_notifier_release()
358 if (!hlist_empty(&subscriptions->list)) in __mmu_notifier_release()
359 mn_hlist_release(subscriptions, mm); in __mmu_notifier_release()
363 * If no young bitflag is supported by the hardware, ->clear_flush_young can
364 * unmap the address and return 1 or 0 depending if the mapping previously
367 int __mmu_notifier_clear_flush_young(struct mm_struct *mm, in __mmu_notifier_clear_flush_young() argument
372 int young = 0, id; in __mmu_notifier_clear_flush_young()
376 &mm->notifier_subscriptions->list, hlist, in __mmu_notifier_clear_flush_young()
378 if (subscription->ops->clear_flush_young) in __mmu_notifier_clear_flush_young()
379 young |= subscription->ops->clear_flush_young( in __mmu_notifier_clear_flush_young()
380 subscription, mm, start, end); in __mmu_notifier_clear_flush_young()
387 int __mmu_notifier_clear_young(struct mm_struct *mm, in __mmu_notifier_clear_young() argument
392 int young = 0, id; in __mmu_notifier_clear_young()
396 &mm->notifier_subscriptions->list, hlist, in __mmu_notifier_clear_young()
398 if (subscription->ops->clear_young) in __mmu_notifier_clear_young()
399 young |= subscription->ops->clear_young(subscription, in __mmu_notifier_clear_young()
400 mm, start, end); in __mmu_notifier_clear_young()
407 int __mmu_notifier_test_young(struct mm_struct *mm, in __mmu_notifier_test_young() argument
411 int young = 0, id; in __mmu_notifier_test_young()
415 &mm->notifier_subscriptions->list, hlist, in __mmu_notifier_test_young()
417 if (subscription->ops->test_young) { in __mmu_notifier_test_young()
418 young = subscription->ops->test_young(subscription, mm, in __mmu_notifier_test_young()
441 ret = interval_sub->ops->invalidate(interval_sub, range, in mn_itree_invalidate()
449 return 0; in mn_itree_invalidate()
453 * On -EAGAIN the non-blocking caller is not allowed to call in mn_itree_invalidate()
457 return -EAGAIN; in mn_itree_invalidate()
465 int ret = 0; in mn_hlist_invalidate_range_start()
469 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, in mn_hlist_invalidate_range_start()
471 const struct mmu_notifier_ops *ops = subscription->ops; in mn_hlist_invalidate_range_start()
473 if (ops->invalidate_range_start) { in mn_hlist_invalidate_range_start()
478 _ret = ops->invalidate_range_start(subscription, range); in mn_hlist_invalidate_range_start()
483 ops->invalidate_range_start, _ret, in mn_hlist_invalidate_range_start()
485 "non-" : in mn_hlist_invalidate_range_start()
488 _ret != -EAGAIN); in mn_hlist_invalidate_range_start()
495 WARN_ON(ops->invalidate_range_end); in mn_hlist_invalidate_range_start()
503 * Must be non-blocking to get here. If there are multiple in mn_hlist_invalidate_range_start()
507 hlist_for_each_entry_rcu(subscription, &subscriptions->list, in mn_hlist_invalidate_range_start()
509 if (!subscription->ops->invalidate_range_end) in mn_hlist_invalidate_range_start()
512 subscription->ops->invalidate_range_end(subscription, in mn_hlist_invalidate_range_start()
524 range->mm->notifier_subscriptions; in __mmu_notifier_invalidate_range_start()
527 if (subscriptions->has_itree) { in __mmu_notifier_invalidate_range_start()
532 if (!hlist_empty(&subscriptions->list)) in __mmu_notifier_invalidate_range_start()
534 return 0; in __mmu_notifier_invalidate_range_start()
545 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, in mn_hlist_invalidate_end()
547 if (subscription->ops->invalidate_range_end) { in mn_hlist_invalidate_end()
550 subscription->ops->invalidate_range_end(subscription, in mn_hlist_invalidate_end()
562 range->mm->notifier_subscriptions; in __mmu_notifier_invalidate_range_end()
565 if (subscriptions->has_itree) in __mmu_notifier_invalidate_range_end()
568 if (!hlist_empty(&subscriptions->list)) in __mmu_notifier_invalidate_range_end()
573 void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, in __mmu_notifier_arch_invalidate_secondary_tlbs() argument
581 &mm->notifier_subscriptions->list, hlist, in __mmu_notifier_arch_invalidate_secondary_tlbs()
583 if (subscription->ops->arch_invalidate_secondary_tlbs) in __mmu_notifier_arch_invalidate_secondary_tlbs()
584 subscription->ops->arch_invalidate_secondary_tlbs( in __mmu_notifier_arch_invalidate_secondary_tlbs()
585 subscription, mm, in __mmu_notifier_arch_invalidate_secondary_tlbs()
597 struct mm_struct *mm) in __mmu_notifier_register() argument
602 mmap_assert_write_locked(mm); in __mmu_notifier_register()
603 BUG_ON(atomic_read(&mm->mm_users) <= 0); in __mmu_notifier_register()
610 (subscription->ops->arch_invalidate_secondary_tlbs && in __mmu_notifier_register()
611 (subscription->ops->invalidate_range_start || in __mmu_notifier_register()
612 subscription->ops->invalidate_range_end)))) in __mmu_notifier_register()
613 return -EINVAL; in __mmu_notifier_register()
615 if (!mm->notifier_subscriptions) { in __mmu_notifier_register()
618 * know that mm->notifier_subscriptions can't change while we in __mmu_notifier_register()
624 return -ENOMEM; in __mmu_notifier_register()
626 INIT_HLIST_HEAD(&subscriptions->list); in __mmu_notifier_register()
627 spin_lock_init(&subscriptions->lock); in __mmu_notifier_register()
628 subscriptions->invalidate_seq = 2; in __mmu_notifier_register()
629 subscriptions->itree = RB_ROOT_CACHED; in __mmu_notifier_register()
630 init_waitqueue_head(&subscriptions->wq); in __mmu_notifier_register()
631 INIT_HLIST_HEAD(&subscriptions->deferred_list); in __mmu_notifier_register()
634 ret = mm_take_all_locks(mm); in __mmu_notifier_register()
642 * current->mm or explicitly with get_task_mm() or similar). in __mmu_notifier_register()
650 * mmu_notifier_subscriptions is not freed until the mm is destroyed. in __mmu_notifier_register()
655 smp_store_release(&mm->notifier_subscriptions, subscriptions); in __mmu_notifier_register()
659 mmgrab(mm); in __mmu_notifier_register()
660 subscription->mm = mm; in __mmu_notifier_register()
661 subscription->users = 1; in __mmu_notifier_register()
663 spin_lock(&mm->notifier_subscriptions->lock); in __mmu_notifier_register()
664 hlist_add_head_rcu(&subscription->hlist, in __mmu_notifier_register()
665 &mm->notifier_subscriptions->list); in __mmu_notifier_register()
666 spin_unlock(&mm->notifier_subscriptions->lock); in __mmu_notifier_register()
668 mm->notifier_subscriptions->has_itree = true; in __mmu_notifier_register()
670 mm_drop_all_locks(mm); in __mmu_notifier_register()
671 BUG_ON(atomic_read(&mm->mm_users) <= 0); in __mmu_notifier_register()
672 return 0; in __mmu_notifier_register()
681 * mmu_notifier_register - Register a notifier on a mm
683 * @mm: The mm to attach the notifier to
688 * so mm has to be current->mm or the mm should be pinned safely such
689 * as with get_task_mm(). If the mm is not current->mm, the mm_users
696 * While the caller has a mmu_notifier get the subscription->mm pointer will remain
697 * valid, and can be converted to an active mm pointer via mmget_not_zero().
700 struct mm_struct *mm) in mmu_notifier_register() argument
704 mmap_write_lock(mm); in mmu_notifier_register()
705 ret = __mmu_notifier_register(subscription, mm); in mmu_notifier_register()
706 mmap_write_unlock(mm); in mmu_notifier_register()
712 find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops) in find_get_mmu_notifier() argument
716 spin_lock(&mm->notifier_subscriptions->lock); in find_get_mmu_notifier()
718 &mm->notifier_subscriptions->list, hlist, in find_get_mmu_notifier()
719 lockdep_is_held(&mm->notifier_subscriptions->lock)) { in find_get_mmu_notifier()
720 if (subscription->ops != ops) in find_get_mmu_notifier()
723 if (likely(subscription->users != UINT_MAX)) in find_get_mmu_notifier()
724 subscription->users++; in find_get_mmu_notifier()
726 subscription = ERR_PTR(-EOVERFLOW); in find_get_mmu_notifier()
727 spin_unlock(&mm->notifier_subscriptions->lock); in find_get_mmu_notifier()
730 spin_unlock(&mm->notifier_subscriptions->lock); in find_get_mmu_notifier()
735 * mmu_notifier_get_locked - Return the single struct mmu_notifier for
736 * the mm & ops
738 * @mm : The mm to attach notifiers too
741 * ops->alloc_notifier(), or returns an already existing notifier on the
746 * mmu_notifier_put(). The caller must hold the write side of mm->mmap_lock.
748 * While the caller has a mmu_notifier get the mm pointer will remain valid,
749 * and can be converted to an active mm pointer via mmget_not_zero().
752 struct mm_struct *mm) in mmu_notifier_get_locked() argument
757 mmap_assert_write_locked(mm); in mmu_notifier_get_locked()
759 if (mm->notifier_subscriptions) { in mmu_notifier_get_locked()
760 subscription = find_get_mmu_notifier(mm, ops); in mmu_notifier_get_locked()
765 subscription = ops->alloc_notifier(mm); in mmu_notifier_get_locked()
768 subscription->ops = ops; in mmu_notifier_get_locked()
769 ret = __mmu_notifier_register(subscription, mm); in mmu_notifier_get_locked()
774 subscription->ops->free_notifier(subscription); in mmu_notifier_get_locked()
780 void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm) in __mmu_notifier_subscriptions_destroy() argument
782 BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list)); in __mmu_notifier_subscriptions_destroy()
783 kfree(mm->notifier_subscriptions); in __mmu_notifier_subscriptions_destroy()
784 mm->notifier_subscriptions = LIST_POISON1; /* debug */ in __mmu_notifier_subscriptions_destroy()
788 * This releases the mm_count pin automatically and frees the mm
792 * calling mmu_notifier_unregister. ->release or any other notifier
795 * that ->release or any other method can't run anymore.
798 struct mm_struct *mm) in mmu_notifier_unregister() argument
800 BUG_ON(atomic_read(&mm->mm_count) <= 0); in mmu_notifier_unregister()
802 if (!hlist_unhashed(&subscription->hlist)) { in mmu_notifier_unregister()
804 * SRCU here will force exit_mmap to wait for ->release to in mmu_notifier_unregister()
812 * that ->release is called before freeing the pages. in mmu_notifier_unregister()
814 if (subscription->ops->release) in mmu_notifier_unregister()
815 subscription->ops->release(subscription, mm); in mmu_notifier_unregister()
818 spin_lock(&mm->notifier_subscriptions->lock); in mmu_notifier_unregister()
823 hlist_del_init_rcu(&subscription->hlist); in mmu_notifier_unregister()
824 spin_unlock(&mm->notifier_subscriptions->lock); in mmu_notifier_unregister()
829 * ->release if it was run by mmu_notifier_release instead of us. in mmu_notifier_unregister()
833 BUG_ON(atomic_read(&mm->mm_count) <= 0); in mmu_notifier_unregister()
835 mmdrop(mm); in mmu_notifier_unregister()
843 struct mm_struct *mm = subscription->mm; in mmu_notifier_free_rcu() local
845 subscription->ops->free_notifier(subscription); in mmu_notifier_free_rcu()
847 mmdrop(mm); in mmu_notifier_free_rcu()
851 * mmu_notifier_put - Release the reference on the notifier
858 * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release
862 * As ops->release is not guaranteed to be called, the user must ensure that
866 * This function can be called from the ops->release callback, however the
874 struct mm_struct *mm = subscription->mm; in mmu_notifier_put() local
876 spin_lock(&mm->notifier_subscriptions->lock); in mmu_notifier_put()
877 if (WARN_ON(!subscription->users) || --subscription->users) in mmu_notifier_put()
879 hlist_del_init_rcu(&subscription->hlist); in mmu_notifier_put()
880 spin_unlock(&mm->notifier_subscriptions->lock); in mmu_notifier_put()
882 call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu); in mmu_notifier_put()
886 spin_unlock(&mm->notifier_subscriptions->lock); in mmu_notifier_put()
891 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm, in __mmu_interval_notifier_insert() argument
895 interval_sub->mm = mm; in __mmu_interval_notifier_insert()
896 interval_sub->ops = ops; in __mmu_interval_notifier_insert()
897 RB_CLEAR_NODE(&interval_sub->interval_tree.rb); in __mmu_interval_notifier_insert()
898 interval_sub->interval_tree.start = start; in __mmu_interval_notifier_insert()
903 if (length == 0 || in __mmu_interval_notifier_insert()
904 check_add_overflow(start, length - 1, in __mmu_interval_notifier_insert()
905 &interval_sub->interval_tree.last)) in __mmu_interval_notifier_insert()
906 return -EOVERFLOW; in __mmu_interval_notifier_insert()
909 if (WARN_ON(atomic_read(&mm->mm_users) <= 0)) in __mmu_interval_notifier_insert()
910 return -EINVAL; in __mmu_interval_notifier_insert()
913 mmgrab(mm); in __mmu_interval_notifier_insert()
925 * In all cases the value for the interval_sub->invalidate_seq should be in __mmu_interval_notifier_insert()
928 spin_lock(&subscriptions->lock); in __mmu_interval_notifier_insert()
929 if (subscriptions->active_invalidate_ranges) { in __mmu_interval_notifier_insert()
931 hlist_add_head(&interval_sub->deferred_item, in __mmu_interval_notifier_insert()
932 &subscriptions->deferred_list); in __mmu_interval_notifier_insert()
934 subscriptions->invalidate_seq |= 1; in __mmu_interval_notifier_insert()
935 interval_tree_insert(&interval_sub->interval_tree, in __mmu_interval_notifier_insert()
936 &subscriptions->itree); in __mmu_interval_notifier_insert()
938 interval_sub->invalidate_seq = subscriptions->invalidate_seq; in __mmu_interval_notifier_insert()
947 interval_sub->invalidate_seq = in __mmu_interval_notifier_insert()
948 subscriptions->invalidate_seq - 1; in __mmu_interval_notifier_insert()
949 interval_tree_insert(&interval_sub->interval_tree, in __mmu_interval_notifier_insert()
950 &subscriptions->itree); in __mmu_interval_notifier_insert()
952 spin_unlock(&subscriptions->lock); in __mmu_interval_notifier_insert()
953 return 0; in __mmu_interval_notifier_insert()
957 * mmu_interval_notifier_insert - Insert an interval notifier
961 * @mm: mm_struct to attach to
965 * mm. Upon return the ops related to mmu_interval_notifier will be called
973 struct mm_struct *mm, unsigned long start, in mmu_interval_notifier_insert() argument
980 might_lock(&mm->mmap_lock); in mmu_interval_notifier_insert()
982 subscriptions = smp_load_acquire(&mm->notifier_subscriptions); in mmu_interval_notifier_insert()
983 if (!subscriptions || !subscriptions->has_itree) { in mmu_interval_notifier_insert()
984 ret = mmu_notifier_register(NULL, mm); in mmu_interval_notifier_insert()
987 subscriptions = mm->notifier_subscriptions; in mmu_interval_notifier_insert()
989 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions, in mmu_interval_notifier_insert()
995 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm, in mmu_interval_notifier_insert_locked() argument
1000 mm->notifier_subscriptions; in mmu_interval_notifier_insert_locked()
1003 mmap_assert_write_locked(mm); in mmu_interval_notifier_insert_locked()
1005 if (!subscriptions || !subscriptions->has_itree) { in mmu_interval_notifier_insert_locked()
1006 ret = __mmu_notifier_register(NULL, mm); in mmu_interval_notifier_insert_locked()
1009 subscriptions = mm->notifier_subscriptions; in mmu_interval_notifier_insert_locked()
1011 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions, in mmu_interval_notifier_insert_locked()
1022 spin_lock(&subscriptions->lock); in mmu_interval_seq_released()
1023 ret = subscriptions->invalidate_seq != seq; in mmu_interval_seq_released()
1024 spin_unlock(&subscriptions->lock); in mmu_interval_seq_released()
1029 * mmu_interval_notifier_remove - Remove a interval notifier
1040 struct mm_struct *mm = interval_sub->mm; in mmu_interval_notifier_remove() local
1042 mm->notifier_subscriptions; in mmu_interval_notifier_remove()
1043 unsigned long seq = 0; in mmu_interval_notifier_remove()
1047 spin_lock(&subscriptions->lock); in mmu_interval_notifier_remove()
1053 if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) { in mmu_interval_notifier_remove()
1054 hlist_del(&interval_sub->deferred_item); in mmu_interval_notifier_remove()
1056 hlist_add_head(&interval_sub->deferred_item, in mmu_interval_notifier_remove()
1057 &subscriptions->deferred_list); in mmu_interval_notifier_remove()
1058 seq = subscriptions->invalidate_seq; in mmu_interval_notifier_remove()
1061 WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb)); in mmu_interval_notifier_remove()
1062 interval_tree_remove(&interval_sub->interval_tree, in mmu_interval_notifier_remove()
1063 &subscriptions->itree); in mmu_interval_notifier_remove()
1065 spin_unlock(&subscriptions->lock); in mmu_interval_notifier_remove()
1074 wait_event(subscriptions->wq, in mmu_interval_notifier_remove()
1078 mmdrop(mm); in mmu_interval_notifier_remove()
1083 * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed