Lines Matching +full:page +full:- +full:level

1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPU-agnostic AMD IO page table v2 allocator.
10 #define pr_fmt(fmt) "AMD-Vi: " fmt
14 #include <linux/io-pgtable.h>
21 #include "../iommu-pages.h"
26 #define IOMMU_PAGE_PWT BIT_ULL(3) /* Page write through */
27 #define IOMMU_PAGE_PCD BIT_ULL(4) /* Page cache disabled */
30 #define IOMMU_PAGE_PSE BIT_ULL(7) /* Page Size Extensions */
49 static inline u64 set_pgtable_attr(u64 *page) in set_pgtable_attr() argument
56 return (iommu_virt_to_phys(page) | prot); in set_pgtable_attr()
75 /* Large page */ in set_pte_attr()
103 static void free_pgtable(u64 *pt, int level) in free_pgtable() argument
117 * Free the next level. No need to look at l1 tables here since in free_pgtable()
121 if (level > 2) in free_pgtable()
122 free_pgtable(p, level - 1); in free_pgtable()
130 /* Allocate page table */
134 u64 *pte, *page; in v2_alloc_pte() local
135 int level, end_level; in v2_alloc_pte() local
137 level = get_pgtable_level() - 1; in v2_alloc_pte()
139 pte = &pgd[PM_LEVEL_INDEX(level, iova)]; in v2_alloc_pte()
142 while (level >= end_level) { in v2_alloc_pte()
155 page = iommu_alloc_page_node(nid, gfp); in v2_alloc_pte()
156 if (!page) in v2_alloc_pte()
159 __npte = set_pgtable_attr(page); in v2_alloc_pte()
162 iommu_free_page(page); in v2_alloc_pte()
169 level -= 1; in v2_alloc_pte()
171 pte = &pte[PM_LEVEL_INDEX(level, iova)]; in v2_alloc_pte()
182 free_pgtable(__pte, end_level - 1); in v2_alloc_pte()
198 int level; in fetch_pte() local
200 level = get_pgtable_level() - 1; in fetch_pte()
201 pte = &pgtable->pgd[PM_LEVEL_INDEX(level, iova)]; in fetch_pte()
202 /* Default page size is 4K */ in fetch_pte()
205 while (level) { in fetch_pte()
210 /* Walk to the next level */ in fetch_pte()
212 pte = &pte[PM_LEVEL_INDEX(level - 1, iova)]; in fetch_pte()
214 /* Large page */ in fetch_pte()
216 if (level == PAGE_MODE_3_LEVEL) in fetch_pte()
218 else if (level == PAGE_MODE_2_LEVEL) in fetch_pte()
226 level -= 1; in fetch_pte()
237 struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg; in iommu_v2_map_pages()
246 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize) || !pgcount) in iommu_v2_map_pages()
247 return -EINVAL; in iommu_v2_map_pages()
250 return -EINVAL; in iommu_v2_map_pages()
254 pte = v2_alloc_pte(cfg->amd.nid, pgtable->pgd, in iommu_v2_map_pages()
257 ret = -EINVAL; in iommu_v2_map_pages()
273 spin_lock_irqsave(&pdom->lock, flags); in iommu_v2_map_pages()
275 spin_unlock_irqrestore(&pdom->lock, flags); in iommu_v2_map_pages()
290 struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg; in iommu_v2_unmap_pages()
296 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount)) in iommu_v2_unmap_pages()
306 iova = (iova & ~(unmap_size - 1)) + unmap_size; in iommu_v2_unmap_pages()
323 offset_mask = pte_pgsize - 1; in iommu_v2_iova_to_phys()
330 * ----------------------------------------------------
336 if (!pgtable || !pgtable->pgd) in v2_free_pgtable()
339 /* Free page table */ in v2_free_pgtable()
340 free_pgtable(pgtable->pgd, get_pgtable_level()); in v2_free_pgtable()
341 pgtable->pgd = NULL; in v2_free_pgtable()
349 pgtable->pgd = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL); in v2_alloc_pgtable()
350 if (!pgtable->pgd) in v2_alloc_pgtable()
356 pgtable->pgtbl.ops.map_pages = iommu_v2_map_pages; in v2_alloc_pgtable()
357 pgtable->pgtbl.ops.unmap_pages = iommu_v2_unmap_pages; in v2_alloc_pgtable()
358 pgtable->pgtbl.ops.iova_to_phys = iommu_v2_iova_to_phys; in v2_alloc_pgtable()
360 cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2; in v2_alloc_pgtable()
361 cfg->ias = ias; in v2_alloc_pgtable()
362 cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE; in v2_alloc_pgtable()
364 return &pgtable->pgtbl; in v2_alloc_pgtable()