Lines Matching full:pages
24 struct iopt_pages *pages; member
43 if (!iter->area->pages) { in iopt_area_contig_init()
66 !iter->area->pages) { in iopt_area_contig_next()
196 * The area takes a slice of the pages from start_bytes to start_byte + length
199 struct iopt_pages *pages, unsigned long iova, in iopt_insert_area() argument
205 if ((iommu_prot & IOMMU_WRITE) && !pages->writable) in iopt_insert_area()
221 if (WARN_ON(area->pages_node.last >= pages->npages)) in iopt_insert_area()
225 * The area is inserted with a NULL pages indicating it is not fully in iopt_insert_area()
271 switch (elm->pages->type) { in iopt_alloc_area_pages()
273 start = elm->start_byte + (uintptr_t)elm->pages->uptr; in iopt_alloc_area_pages()
276 start = elm->start_byte + elm->pages->start; in iopt_alloc_area_pages()
294 * Areas are created with a NULL pages so that the IOVA space is in iopt_alloc_area_pages()
299 rc = iopt_insert_area(iopt, elm->area, elm->pages, iova, in iopt_alloc_area_pages()
314 WARN_ON(area->pages); in iopt_abort_area()
331 if (elm->pages) in iopt_free_pages_list()
332 iopt_put_pages(elm->pages); in iopt_free_pages_list()
345 rc = iopt_area_fill_domains(elm->area, elm->pages); in iopt_fill_domains_pages()
355 iopt_area_unfill_domains(undo_elm->area, undo_elm->pages); in iopt_fill_domains_pages()
380 * area->pages must be set inside the domains_rwsem to ensure in iopt_map_pages()
384 elm->area->pages = elm->pages; in iopt_map_pages()
385 elm->pages = NULL; in iopt_map_pages()
395 struct iopt_pages *pages, unsigned long *iova, in iopt_map_common() argument
403 elm.pages = pages; in iopt_map_common()
406 elm.pages->account_mode == IOPT_PAGES_ACCOUNT_USER) in iopt_map_common()
407 elm.pages->account_mode = IOPT_PAGES_ACCOUNT_MM; in iopt_map_common()
415 if (elm.pages) in iopt_map_common()
416 iopt_put_pages(elm.pages); in iopt_map_common()
434 * page tables this will pin the pages and load them into the domain at iova.
446 struct iopt_pages *pages; in iopt_map_user_pages() local
448 pages = iopt_alloc_user_pages(uptr, length, iommu_prot & IOMMU_WRITE); in iopt_map_user_pages()
449 if (IS_ERR(pages)) in iopt_map_user_pages()
450 return PTR_ERR(pages); in iopt_map_user_pages()
452 return iopt_map_common(ictx, iopt, pages, iova, length, in iopt_map_user_pages()
453 uptr - pages->uptr, iommu_prot, flags); in iopt_map_user_pages()
473 struct iopt_pages *pages; in iopt_map_file_pages() local
475 pages = iopt_alloc_file_pages(file, start, length, in iopt_map_file_pages()
477 if (IS_ERR(pages)) in iopt_map_file_pages()
478 return PTR_ERR(pages); in iopt_map_file_pages()
479 return iopt_map_common(ictx, iopt, pages, iova, length, in iopt_map_file_pages()
480 start - pages->start, iommu_prot, flags); in iopt_map_file_pages()
615 if (!area->pages) in iopt_clear_dirty_data()
678 elm->pages = area->pages; in iopt_get_pages()
680 kref_get(&elm->pages->kref); in iopt_get_pages()
704 * The domains_rwsem must be held in read mode any time any area->pages in iopt_unmap_iova_range()
714 struct iopt_pages *pages; in iopt_unmap_iova_range() local
717 if (!area->pages) { in iopt_unmap_iova_range()
733 * without the pages->mutex. in iopt_unmap_iova_range()
751 pages = area->pages; in iopt_unmap_iova_range()
752 area->pages = NULL; in iopt_unmap_iova_range()
755 iopt_area_unfill_domains(area, pages); in iopt_unmap_iova_range()
757 iopt_put_pages(pages); in iopt_unmap_iova_range()
942 struct iopt_pages *pages = area->pages; in iopt_unfill_domain() local
944 if (!pages) in iopt_unfill_domain()
947 mutex_lock(&pages->mutex); in iopt_unfill_domain()
952 mutex_unlock(&pages->mutex); in iopt_unfill_domain()
961 struct iopt_pages *pages = area->pages; in iopt_unfill_domain() local
963 if (!pages) in iopt_unfill_domain()
966 mutex_lock(&pages->mutex); in iopt_unfill_domain()
967 interval_tree_remove(&area->pages_node, &pages->domains_itree); in iopt_unfill_domain()
970 iopt_area_unfill_domain(area, pages, domain); in iopt_unfill_domain()
971 mutex_unlock(&pages->mutex); in iopt_unfill_domain()
995 struct iopt_pages *pages = area->pages; in iopt_fill_domain() local
997 if (!pages) in iopt_fill_domain()
1000 mutex_lock(&pages->mutex); in iopt_fill_domain()
1003 mutex_unlock(&pages->mutex); in iopt_fill_domain()
1010 &pages->domains_itree); in iopt_fill_domain()
1012 mutex_unlock(&pages->mutex); in iopt_fill_domain()
1020 struct iopt_pages *pages = area->pages; in iopt_fill_domain() local
1024 if (!pages) in iopt_fill_domain()
1026 mutex_lock(&pages->mutex); in iopt_fill_domain()
1029 &pages->domains_itree); in iopt_fill_domain()
1032 iopt_area_unfill_domain(area, pages, domain); in iopt_fill_domain()
1033 mutex_unlock(&pages->mutex); in iopt_fill_domain()
1230 struct iopt_pages *pages = area->pages; in iopt_area_split() local
1240 if (!pages || area->prevent_access) in iopt_area_split()
1257 mutex_lock(&pages->mutex); in iopt_area_split()
1269 * huge pages. in iopt_area_split()
1277 rc = iopt_insert_area(iopt, lhs, area->pages, start_iova, in iopt_area_split()
1284 rc = iopt_insert_area(iopt, rhs, area->pages, new_start, in iopt_area_split()
1295 interval_tree_remove(&area->pages_node, &pages->domains_itree); in iopt_area_split()
1296 interval_tree_insert(&lhs->pages_node, &pages->domains_itree); in iopt_area_split()
1297 interval_tree_insert(&rhs->pages_node, &pages->domains_itree); in iopt_area_split()
1301 lhs->pages = area->pages; in iopt_area_split()
1303 rhs->pages = area->pages; in iopt_area_split()
1304 kref_get(&rhs->pages->kref); in iopt_area_split()
1306 mutex_unlock(&pages->mutex); in iopt_area_split()
1309 * No change to domains or accesses because the pages hasn't been in iopt_area_split()
1319 mutex_unlock(&pages->mutex); in iopt_area_split()
1369 /* Won't do it if domains already have pages mapped in them */ in iopt_disable_large_pages()