Lines Matching full:pages

70  * allocation can hold about 26M of 4k pages and 13G of 2M pages in an
164 static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_add_npinned() argument
168 rc = check_add_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_add_npinned()
170 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_add_npinned()
173 static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_sub_npinned() argument
177 rc = check_sub_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_sub_npinned()
179 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_sub_npinned()
182 static void iopt_pages_err_unpin(struct iopt_pages *pages, in iopt_pages_err_unpin() argument
190 iopt_pages_sub_npinned(pages, npages); in iopt_pages_err_unpin()
196 * covers a portion of the first and last pages in the range.
250 static struct iopt_area *iopt_pages_find_domain_area(struct iopt_pages *pages, in iopt_pages_find_domain_area() argument
255 node = interval_tree_iter_first(&pages->domains_itree, index, index); in iopt_pages_find_domain_area()
591 unsigned long last_index, struct page **pages) in pages_to_xarray() argument
593 struct page **end_pages = pages + (last_index - start_index) + 1; in pages_to_xarray()
594 struct page **half_pages = pages + (end_pages - pages) / 2; in pages_to_xarray()
601 while (pages != end_pages) { in pages_to_xarray()
603 if (pages == half_pages && iommufd_should_fail()) { in pages_to_xarray()
611 old = xas_store(&xas, xa_mk_value(page_to_pfn(*pages))); in pages_to_xarray()
615 pages++; in pages_to_xarray()
630 static void batch_from_pages(struct pfn_batch *batch, struct page **pages, in batch_from_pages() argument
633 struct page **end = pages + npages; in batch_from_pages()
635 for (; pages != end; pages++) in batch_from_pages()
636 if (!batch_add_pfn(batch, page_to_pfn(*pages))) in batch_from_pages()
675 static void batch_unpin(struct pfn_batch *batch, struct iopt_pages *pages, in batch_unpin() argument
693 to_unpin, pages->writable); in batch_unpin()
694 iopt_pages_sub_npinned(pages, to_unpin); in batch_unpin()
766 struct iopt_pages *pages) in pfn_reader_user_init() argument
774 if (pages->writable) in pfn_reader_user_init()
777 user->file = (pages->type == IOPT_ADDRESS_FILE) ? pages->file : NULL; in pfn_reader_user_init()
785 struct iopt_pages *pages) in pfn_reader_user_destroy() argument
789 mmap_read_unlock(pages->source_mm); in pfn_reader_user_destroy()
790 if (!user->file && pages->source_mm != current->mm) in pfn_reader_user_destroy()
791 mmput(pages->source_mm); in pfn_reader_user_destroy()
854 struct iopt_pages *pages, in pfn_reader_user_pin() argument
858 bool remote_mm = pages->source_mm != current->mm; in pfn_reader_user_pin()
887 * providing the pages, so we can optimize into in pfn_reader_user_pin()
891 if (!mmget_not_zero(pages->source_mm)) in pfn_reader_user_pin()
905 start = pages->start + (start_index * PAGE_SIZE); in pfn_reader_user_pin()
908 uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE); in pfn_reader_user_pin()
912 uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE); in pfn_reader_user_pin()
914 mmap_read_lock(pages->source_mm); in pfn_reader_user_pin()
917 rc = pin_user_pages_remote(pages->source_mm, uptr, npages, in pfn_reader_user_pin()
926 iopt_pages_add_npinned(pages, rc); in pfn_reader_user_pin()
933 static int incr_user_locked_vm(struct iopt_pages *pages, unsigned long npages) in incr_user_locked_vm() argument
939 lock_limit = task_rlimit(pages->source_task, RLIMIT_MEMLOCK) >> in incr_user_locked_vm()
942 cur_pages = atomic_long_read(&pages->source_user->locked_vm); in incr_user_locked_vm()
947 } while (!atomic_long_try_cmpxchg(&pages->source_user->locked_vm, in incr_user_locked_vm()
952 static void decr_user_locked_vm(struct iopt_pages *pages, unsigned long npages) in decr_user_locked_vm() argument
954 if (WARN_ON(atomic_long_read(&pages->source_user->locked_vm) < npages)) in decr_user_locked_vm()
956 atomic_long_sub(npages, &pages->source_user->locked_vm); in decr_user_locked_vm()
960 static int update_mm_locked_vm(struct iopt_pages *pages, unsigned long npages, in update_mm_locked_vm() argument
967 mmap_read_unlock(pages->source_mm); in update_mm_locked_vm()
972 pages->source_mm != current->mm) { in update_mm_locked_vm()
973 if (!mmget_not_zero(pages->source_mm)) in update_mm_locked_vm()
978 mmap_write_lock(pages->source_mm); in update_mm_locked_vm()
979 rc = __account_locked_vm(pages->source_mm, npages, inc, in update_mm_locked_vm()
980 pages->source_task, false); in update_mm_locked_vm()
981 mmap_write_unlock(pages->source_mm); in update_mm_locked_vm()
984 mmput(pages->source_mm); in update_mm_locked_vm()
988 int iopt_pages_update_pinned(struct iopt_pages *pages, unsigned long npages, in iopt_pages_update_pinned() argument
993 switch (pages->account_mode) { in iopt_pages_update_pinned()
998 rc = incr_user_locked_vm(pages, npages); in iopt_pages_update_pinned()
1000 decr_user_locked_vm(pages, npages); in iopt_pages_update_pinned()
1003 rc = update_mm_locked_vm(pages, npages, inc, user); in iopt_pages_update_pinned()
1009 pages->last_npinned = pages->npinned; in iopt_pages_update_pinned()
1011 atomic64_add(npages, &pages->source_mm->pinned_vm); in iopt_pages_update_pinned()
1013 atomic64_sub(npages, &pages->source_mm->pinned_vm); in iopt_pages_update_pinned()
1017 static void update_unpinned(struct iopt_pages *pages) in update_unpinned() argument
1019 if (WARN_ON(pages->npinned > pages->last_npinned)) in update_unpinned()
1021 if (pages->npinned == pages->last_npinned) in update_unpinned()
1023 iopt_pages_update_pinned(pages, pages->last_npinned - pages->npinned, in update_unpinned()
1028 * Changes in the number of pages pinned is done after the pages have been read
1031 * how many pages we have already pinned within a range to generate an accurate
1035 struct iopt_pages *pages) in pfn_reader_user_update_pinned() argument
1040 lockdep_assert_held(&pages->mutex); in pfn_reader_user_update_pinned()
1042 if (pages->npinned == pages->last_npinned) in pfn_reader_user_update_pinned()
1045 if (pages->npinned < pages->last_npinned) { in pfn_reader_user_update_pinned()
1046 npages = pages->last_npinned - pages->npinned; in pfn_reader_user_update_pinned()
1051 npages = pages->npinned - pages->last_npinned; in pfn_reader_user_update_pinned()
1054 return iopt_pages_update_pinned(pages, npages, inc, user); in pfn_reader_user_update_pinned()
1062 * - The original PFN source, ie pages->source_mm
1068 struct iopt_pages *pages; member
1080 return pfn_reader_user_update_pinned(&pfns->user, pfns->pages); in pfn_reader_update_pinned()
1084 * The batch can contain a mixture of pages that are still in use and pages that
1085 * need to be unpinned. Unpin only pages that are not held anywhere else.
1092 struct iopt_pages *pages = pfns->pages; in pfn_reader_unpin() local
1094 lockdep_assert_held(&pages->mutex); in pfn_reader_unpin()
1096 interval_tree_for_each_double_span(&span, &pages->access_itree, in pfn_reader_unpin()
1097 &pages->domains_itree, start, last) { in pfn_reader_unpin()
1101 batch_unpin(&pfns->batch, pages, span.start_hole - start, in pfn_reader_unpin()
1121 batch_from_xarray(&pfns->batch, &pfns->pages->pinned_pfns, in pfn_reader_fill_span()
1128 * Pull as many pages from the first domain we find in the in pfn_reader_fill_span()
1132 area = iopt_pages_find_domain_area(pfns->pages, start_index); in pfn_reader_fill_span()
1136 /* The storage_domain cannot change without the pages mutex */ in pfn_reader_fill_span()
1144 rc = pfn_reader_user_pin(&pfns->user, pfns->pages, start_index, in pfn_reader_fill_span()
1201 static int pfn_reader_init(struct pfn_reader *pfns, struct iopt_pages *pages, in pfn_reader_init() argument
1206 lockdep_assert_held(&pages->mutex); in pfn_reader_init()
1208 pfns->pages = pages; in pfn_reader_init()
1212 pfn_reader_user_init(&pfns->user, pages); in pfn_reader_init()
1216 interval_tree_double_span_iter_first(&pfns->span, &pages->access_itree, in pfn_reader_init()
1217 &pages->domains_itree, start_index, in pfn_reader_init()
1223 * There are many assertions regarding the state of pages->npinned vs
1224 * pages->last_pinned, for instance something like unmapping a domain must only
1232 struct iopt_pages *pages = pfns->pages; in pfn_reader_release_pins() local
1236 /* Any pages not transferred to the batch are just unpinned */ in pfn_reader_release_pins()
1250 iopt_pages_sub_npinned(pages, npages); in pfn_reader_release_pins()
1261 struct iopt_pages *pages = pfns->pages; in pfn_reader_destroy() local
1264 pfn_reader_user_destroy(&pfns->user, pfns->pages); in pfn_reader_destroy()
1266 WARN_ON(pages->last_npinned != pages->npinned); in pfn_reader_destroy()
1269 static int pfn_reader_first(struct pfn_reader *pfns, struct iopt_pages *pages, in pfn_reader_first() argument
1278 rc = pfn_reader_init(pfns, pages, start_index, last_index); in pfn_reader_first()
1293 struct iopt_pages *pages; in iopt_alloc_pages() local
1302 pages = kzalloc(sizeof(*pages), GFP_KERNEL_ACCOUNT); in iopt_alloc_pages()
1303 if (!pages) in iopt_alloc_pages()
1306 kref_init(&pages->kref); in iopt_alloc_pages()
1307 xa_init_flags(&pages->pinned_pfns, XA_FLAGS_ACCOUNT); in iopt_alloc_pages()
1308 mutex_init(&pages->mutex); in iopt_alloc_pages()
1309 pages->source_mm = current->mm; in iopt_alloc_pages()
1310 mmgrab(pages->source_mm); in iopt_alloc_pages()
1311 pages->npages = DIV_ROUND_UP(length + start_byte, PAGE_SIZE); in iopt_alloc_pages()
1312 pages->access_itree = RB_ROOT_CACHED; in iopt_alloc_pages()
1313 pages->domains_itree = RB_ROOT_CACHED; in iopt_alloc_pages()
1314 pages->writable = writable; in iopt_alloc_pages()
1316 pages->account_mode = IOPT_PAGES_ACCOUNT_NONE; in iopt_alloc_pages()
1318 pages->account_mode = IOPT_PAGES_ACCOUNT_USER; in iopt_alloc_pages()
1319 pages->source_task = current->group_leader; in iopt_alloc_pages()
1321 pages->source_user = get_uid(current_user()); in iopt_alloc_pages()
1322 return pages; in iopt_alloc_pages()
1328 struct iopt_pages *pages; in iopt_alloc_user_pages() local
1336 pages = iopt_alloc_pages(uptr - uptr_down, length, writable); in iopt_alloc_user_pages()
1337 if (IS_ERR(pages)) in iopt_alloc_user_pages()
1338 return pages; in iopt_alloc_user_pages()
1339 pages->uptr = uptr_down; in iopt_alloc_user_pages()
1340 pages->type = IOPT_ADDRESS_USER; in iopt_alloc_user_pages()
1341 return pages; in iopt_alloc_user_pages()
1348 struct iopt_pages *pages; in iopt_alloc_file_pages() local
1355 pages = iopt_alloc_pages(start - start_down, length, writable); in iopt_alloc_file_pages()
1356 if (IS_ERR(pages)) in iopt_alloc_file_pages()
1357 return pages; in iopt_alloc_file_pages()
1358 pages->file = get_file(file); in iopt_alloc_file_pages()
1359 pages->start = start_down; in iopt_alloc_file_pages()
1360 pages->type = IOPT_ADDRESS_FILE; in iopt_alloc_file_pages()
1361 return pages; in iopt_alloc_file_pages()
1366 struct iopt_pages *pages = container_of(kref, struct iopt_pages, kref); in iopt_release_pages() local
1368 WARN_ON(!RB_EMPTY_ROOT(&pages->access_itree.rb_root)); in iopt_release_pages()
1369 WARN_ON(!RB_EMPTY_ROOT(&pages->domains_itree.rb_root)); in iopt_release_pages()
1370 WARN_ON(pages->npinned); in iopt_release_pages()
1371 WARN_ON(!xa_empty(&pages->pinned_pfns)); in iopt_release_pages()
1372 mmdrop(pages->source_mm); in iopt_release_pages()
1373 mutex_destroy(&pages->mutex); in iopt_release_pages()
1374 put_task_struct(pages->source_task); in iopt_release_pages()
1375 free_uid(pages->source_user); in iopt_release_pages()
1376 if (pages->type == IOPT_ADDRESS_FILE) in iopt_release_pages()
1377 fput(pages->file); in iopt_release_pages()
1378 kfree(pages); in iopt_release_pages()
1383 struct iopt_pages *pages, struct iommu_domain *domain, in iopt_area_unpin_domain() argument
1413 * contiguous pages. Thus, if we have to stop unpinning in the in iopt_area_unpin_domain()
1433 batch_unpin(batch, pages, 0, in iopt_area_unpin_domain()
1443 struct iopt_pages *pages, in __iopt_area_unfill_domain() argument
1453 lockdep_assert_held(&pages->mutex); in __iopt_area_unfill_domain()
1457 * so this must unmap any IOVA before we go ahead and unpin the pages. in __iopt_area_unfill_domain()
1458 * This creates a complexity where we need to skip over unpinning pages in __iopt_area_unfill_domain()
1470 interval_tree_for_each_double_span(&span, &pages->domains_itree, in __iopt_area_unfill_domain()
1471 &pages->access_itree, start_index, in __iopt_area_unfill_domain()
1478 iopt_area_unpin_domain(&batch, area, pages, domain, in __iopt_area_unfill_domain()
1491 update_unpinned(pages); in __iopt_area_unfill_domain()
1495 struct iopt_pages *pages, in iopt_area_unfill_partial_domain() argument
1500 __iopt_area_unfill_domain(area, pages, domain, end_index - 1); in iopt_area_unfill_partial_domain()
1520 * @pages: page supplier for the area (area->pages is NULL)
1527 void iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages, in iopt_area_unfill_domain() argument
1530 __iopt_area_unfill_domain(area, pages, domain, in iopt_area_unfill_domain()
1548 lockdep_assert_held(&area->pages->mutex); in iopt_area_fill_domain()
1550 rc = pfn_reader_first(&pfns, area->pages, iopt_area_index(area), in iopt_area_fill_domain()
1575 iopt_area_unfill_partial_domain(area, area->pages, domain, in iopt_area_fill_domain()
1585 * @pages: The pages associated with the area (area->pages is NULL)
1593 int iopt_area_fill_domains(struct iopt_area *area, struct iopt_pages *pages) in iopt_area_fill_domains() argument
1608 mutex_lock(&pages->mutex); in iopt_area_fill_domains()
1609 rc = pfn_reader_first(&pfns, pages, iopt_area_index(area), in iopt_area_fill_domains()
1634 interval_tree_insert(&area->pages_node, &pages->domains_itree); in iopt_area_fill_domains()
1658 iopt_area_unfill_partial_domain(area, pages, domain, in iopt_area_fill_domains()
1665 mutex_unlock(&pages->mutex); in iopt_area_fill_domains()
1672 * @pages: The pages associated with the area (area->pages is NULL)
1677 void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages) in iopt_area_unfill_domains() argument
1685 mutex_lock(&pages->mutex); in iopt_area_unfill_domains()
1697 interval_tree_remove(&area->pages_node, &pages->domains_itree); in iopt_area_unfill_domains()
1698 iopt_area_unfill_domain(area, pages, area->storage_domain); in iopt_area_unfill_domains()
1701 mutex_unlock(&pages->mutex); in iopt_area_unfill_domains()
1705 struct iopt_pages *pages, in iopt_pages_unpin_xarray() argument
1710 batch_from_xarray_clear(batch, &pages->pinned_pfns, start_index, in iopt_pages_unpin_xarray()
1712 batch_unpin(batch, pages, 0, batch->total_pfns); in iopt_pages_unpin_xarray()
1720 * @pages: The pages to act on
1724 * Called when an iopt_pages_access is removed, removes pages from the itree.
1727 void iopt_pages_unfill_xarray(struct iopt_pages *pages, in iopt_pages_unfill_xarray() argument
1736 lockdep_assert_held(&pages->mutex); in iopt_pages_unfill_xarray()
1738 interval_tree_for_each_double_span(&span, &pages->access_itree, in iopt_pages_unfill_xarray()
1739 &pages->domains_itree, start_index, in iopt_pages_unfill_xarray()
1748 iopt_pages_unpin_xarray(&batch, pages, span.start_hole, in iopt_pages_unfill_xarray()
1752 clear_xarray(&pages->pinned_pfns, span.start_used, in iopt_pages_unfill_xarray()
1759 update_unpinned(pages); in iopt_pages_unfill_xarray()
1764 * @pages: The pages to act on
1767 * @out_pages: The output array to return the pages
1771 * the pages directly from the xarray.
1773 * This is part of the SW iommu interface to read pages for in-kernel use.
1775 void iopt_pages_fill_from_xarray(struct iopt_pages *pages, in iopt_pages_fill_from_xarray() argument
1780 XA_STATE(xas, &pages->pinned_pfns, start_index); in iopt_pages_fill_from_xarray()
1795 static int iopt_pages_fill_from_domain(struct iopt_pages *pages, in iopt_pages_fill_from_domain() argument
1804 area = iopt_pages_find_domain_area(pages, start_index); in iopt_pages_fill_from_domain()
1817 static int iopt_pages_fill(struct iopt_pages *pages, in iopt_pages_fill() argument
1828 rc = pfn_reader_user_pin(user, pages, cur_index, last_index); in iopt_pages_fill()
1837 iopt_pages_err_unpin(pages, start_index, cur_index - 1, in iopt_pages_fill()
1844 * @pages: The pages to act on
1847 * @out_pages: The output array to return the pages, may be NULL
1849 * This populates the xarray and returns the pages in out_pages. As the slow
1850 * path this is able to copy pages from other storage tiers into the xarray.
1854 * This is part of the SW iommu interface to read pages for in-kernel use.
1856 int iopt_pages_fill_xarray(struct iopt_pages *pages, unsigned long start_index, in iopt_pages_fill_xarray() argument
1864 lockdep_assert_held(&pages->mutex); in iopt_pages_fill_xarray()
1866 pfn_reader_user_init(&user, pages); in iopt_pages_fill_xarray()
1868 interval_tree_for_each_double_span(&span, &pages->access_itree, in iopt_pages_fill_xarray()
1869 &pages->domains_itree, start_index, in iopt_pages_fill_xarray()
1875 iopt_pages_fill_from_xarray(pages, span.start_used, in iopt_pages_fill_xarray()
1882 iopt_pages_fill_from_domain(pages, span.start_used, in iopt_pages_fill_xarray()
1884 rc = pages_to_xarray(&pages->pinned_pfns, in iopt_pages_fill_xarray()
1895 rc = iopt_pages_fill(pages, &user, span.start_hole, in iopt_pages_fill_xarray()
1899 rc = pages_to_xarray(&pages->pinned_pfns, span.start_hole, in iopt_pages_fill_xarray()
1902 iopt_pages_err_unpin(pages, span.start_hole, in iopt_pages_fill_xarray()
1908 rc = pfn_reader_user_update_pinned(&user, pages); in iopt_pages_fill_xarray()
1912 pfn_reader_user_destroy(&user, pages); in iopt_pages_fill_xarray()
1917 iopt_pages_unfill_xarray(pages, start_index, xa_end - 1); in iopt_pages_fill_xarray()
1919 pfn_reader_user_destroy(&user, pages); in iopt_pages_fill_xarray()
1928 static int iopt_pages_rw_slow(struct iopt_pages *pages, in iopt_pages_rw_slow() argument
1937 mutex_lock(&pages->mutex); in iopt_pages_rw_slow()
1939 rc = pfn_reader_first(&pfns, pages, start_index, last_index); in iopt_pages_rw_slow()
1961 mutex_unlock(&pages->mutex); in iopt_pages_rw_slow()
1969 static int iopt_pages_rw_page(struct iopt_pages *pages, unsigned long index, in iopt_pages_rw_page() argument
1977 WARN_ON(pages->type != IOPT_ADDRESS_USER)) in iopt_pages_rw_page()
1980 if (!mmget_not_zero(pages->source_mm)) in iopt_pages_rw_page()
1981 return iopt_pages_rw_slow(pages, index, index, offset, data, in iopt_pages_rw_page()
1989 mmap_read_lock(pages->source_mm); in iopt_pages_rw_page()
1991 pages->source_mm, (uintptr_t)(pages->uptr + index * PAGE_SIZE), in iopt_pages_rw_page()
1994 mmap_read_unlock(pages->source_mm); in iopt_pages_rw_page()
2005 mmput(pages->source_mm); in iopt_pages_rw_page()
2010 * iopt_pages_rw_access - Copy to/from a linear slice of the pages
2011 * @pages: pages to act on
2012 * @start_byte: First byte of pages to copy to/from
2020 int iopt_pages_rw_access(struct iopt_pages *pages, unsigned long start_byte, in iopt_pages_rw_access() argument
2025 bool change_mm = current->mm != pages->source_mm; in iopt_pages_rw_access()
2032 if ((flags & IOMMUFD_ACCESS_RW_WRITE) && !pages->writable) in iopt_pages_rw_access()
2035 if (pages->type == IOPT_ADDRESS_FILE) in iopt_pages_rw_access()
2036 return iopt_pages_rw_slow(pages, start_index, last_index, in iopt_pages_rw_access()
2041 WARN_ON(pages->type != IOPT_ADDRESS_USER)) in iopt_pages_rw_access()
2046 return iopt_pages_rw_page(pages, start_index, in iopt_pages_rw_access()
2049 return iopt_pages_rw_slow(pages, start_index, last_index, in iopt_pages_rw_access()
2059 if (!mmget_not_zero(pages->source_mm)) in iopt_pages_rw_access()
2060 return iopt_pages_rw_slow(pages, start_index, in iopt_pages_rw_access()
2064 kthread_use_mm(pages->source_mm); in iopt_pages_rw_access()
2068 if (copy_to_user(pages->uptr + start_byte, data, length)) in iopt_pages_rw_access()
2071 if (copy_from_user(data, pages->uptr + start_byte, length)) in iopt_pages_rw_access()
2076 kthread_unuse_mm(pages->source_mm); in iopt_pages_rw_access()
2077 mmput(pages->source_mm); in iopt_pages_rw_access()
2084 iopt_pages_get_exact_access(struct iopt_pages *pages, unsigned long index, in iopt_pages_get_exact_access() argument
2089 lockdep_assert_held(&pages->mutex); in iopt_pages_get_exact_access()
2092 for (node = interval_tree_iter_first(&pages->access_itree, index, last); in iopt_pages_get_exact_access()
2108 * Record that an in-kernel access will be accessing the pages, ensure they are
2117 struct iopt_pages *pages = area->pages; in iopt_area_add_access() local
2121 if ((flags & IOMMUFD_ACCESS_RW_WRITE) && !pages->writable) in iopt_area_add_access()
2124 mutex_lock(&pages->mutex); in iopt_area_add_access()
2125 access = iopt_pages_get_exact_access(pages, start_index, last_index); in iopt_area_add_access()
2129 iopt_pages_fill_from_xarray(pages, start_index, last_index, in iopt_area_add_access()
2131 mutex_unlock(&pages->mutex); in iopt_area_add_access()
2141 rc = iopt_pages_fill_xarray(pages, start_index, last_index, out_pages); in iopt_area_add_access()
2149 interval_tree_insert(&access->node, &pages->access_itree); in iopt_area_add_access()
2150 mutex_unlock(&pages->mutex); in iopt_area_add_access()
2156 mutex_unlock(&pages->mutex); in iopt_area_add_access()
2166 * Undo iopt_area_add_access() and unpin the pages if necessary. The caller
2172 struct iopt_pages *pages = area->pages; in iopt_area_remove_access() local
2175 mutex_lock(&pages->mutex); in iopt_area_remove_access()
2176 access = iopt_pages_get_exact_access(pages, start_index, last_index); in iopt_area_remove_access()
2186 interval_tree_remove(&access->node, &pages->access_itree); in iopt_area_remove_access()
2187 iopt_pages_unfill_xarray(pages, start_index, last_index); in iopt_area_remove_access()
2190 mutex_unlock(&pages->mutex); in iopt_area_remove_access()