Lines Matching +full:page +full:- +full:level
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
17 #include <linux/dma-mapping.h>
23 #define PVR_MASK_FROM_SIZE(size_) (~((size_) - U64_C(1)))
26 * The value of the device page size (%PVR_DEVICE_PAGE_SIZE) is currently
27 * pegged to the host page size (%PAGE_SIZE). This chunk of macro goodness both
28 * ensures that the selected host page size corresponds to a valid device page
56 # error Unsupported device page size PVR_DEVICE_PAGE_SIZE
61 (PVR_DEVICE_PAGE_SHIFT - PVR_SHIFT_FROM_SIZE(SZ_4K)))
64 PVR_MMU_SYNC_LEVEL_NONE = -1,
77 * pvr_mmu_set_flush_flags() - Set MMU cache flush flags for next call to
82 * This function must be called following any possible change to the MMU page
87 atomic_fetch_or(flags, &pvr_dev->mmu_flush_cache_flags); in pvr_mmu_set_flush_flags()
91 * pvr_mmu_flush_request_all() - Request flush of all MMU caches when
95 * This function must be called following any possible change to the MMU page
104 * pvr_mmu_flush_exec() - Execute a flush of all MMU caches previously
120 * * -%EIO if the device is unavailable, or
133 return -EIO; in pvr_mmu_flush_exec()
136 if (!pvr_dev->fw_dev.booted) in pvr_mmu_flush_exec()
139 cmd_mmu_cache_data->cache_flags = in pvr_mmu_flush_exec()
140 atomic_xchg(&pvr_dev->mmu_flush_cache_flags, 0); in pvr_mmu_flush_exec()
142 if (!cmd_mmu_cache_data->cache_flags) in pvr_mmu_flush_exec()
147 pvr_fw_object_get_fw_addr(pvr_dev->fw_dev.mem.mmucache_sync_obj, in pvr_mmu_flush_exec()
148 &cmd_mmu_cache_data->mmu_cache_sync_fw_addr); in pvr_mmu_flush_exec()
149 cmd_mmu_cache_data->mmu_cache_sync_update_value = 0; in pvr_mmu_flush_exec()
199 * Default value for a u16-based index.
203 #define PVR_IDX_INVALID ((u16)(-1))
213 * Page size of a PowerVR device's integrated MMU. The CPU page size must be
215 * checked at compile-time.
221 * struct pvr_mmu_backing_page - Represents a single page used to back a page
222 * table of any level.
223 * @dma_addr: DMA address of this page.
224 * @host_ptr: CPU address of this page.
225 * @pvr_dev: The PowerVR device to which this page is associated. **For
232 struct page *raw_page;
237 * pvr_mmu_backing_page_init() - Initialize a MMU backing page.
238 * @page: Target backing page.
243 * 1. Allocate a single page,
244 * 2. Map the page to the CPU, and
245 * 3. Map the page to DMA-space.
247 * It is expected that @page be zeroed (e.g. from kzalloc()) before calling
252 * * -%ENOMEM if allocation of the backing page or mapping of the backing
253 * page to DMA fails.
256 pvr_mmu_backing_page_init(struct pvr_mmu_backing_page *page, in pvr_mmu_backing_page_init() argument
259 struct device *dev = from_pvr_device(pvr_dev)->dev; in pvr_mmu_backing_page_init()
261 struct page *raw_page; in pvr_mmu_backing_page_init()
269 return -ENOMEM; in pvr_mmu_backing_page_init()
273 err = -ENOMEM; in pvr_mmu_backing_page_init()
280 err = -ENOMEM; in pvr_mmu_backing_page_init()
284 page->dma_addr = dma_addr; in pvr_mmu_backing_page_init()
285 page->host_ptr = host_ptr; in pvr_mmu_backing_page_init()
286 page->pvr_dev = pvr_dev; in pvr_mmu_backing_page_init()
287 page->raw_page = raw_page; in pvr_mmu_backing_page_init()
288 kmemleak_alloc(page->host_ptr, PAGE_SIZE, 1, GFP_KERNEL); in pvr_mmu_backing_page_init()
302 * pvr_mmu_backing_page_fini() - Teardown a MMU backing page.
303 * @page: Target backing page.
308 * 1. Unmap the page from DMA-space,
309 * 2. Unmap the page from the CPU, and
310 * 3. Free the page.
312 * It also zeros @page.
314 * It is a no-op to call this function a second (or further) time on any @page.
317 pvr_mmu_backing_page_fini(struct pvr_mmu_backing_page *page) in pvr_mmu_backing_page_fini() argument
322 if (!page->pvr_dev) in pvr_mmu_backing_page_fini()
325 dev = from_pvr_device(page->pvr_dev)->dev; in pvr_mmu_backing_page_fini()
327 dma_unmap_page(dev, page->dma_addr, PVR_MMU_BACKING_PAGE_SIZE, in pvr_mmu_backing_page_fini()
330 kmemleak_free(page->host_ptr); in pvr_mmu_backing_page_fini()
331 vunmap(page->host_ptr); in pvr_mmu_backing_page_fini()
333 __free_page(page->raw_page); in pvr_mmu_backing_page_fini()
335 memset(page, 0, sizeof(*page)); in pvr_mmu_backing_page_fini()
339 * pvr_mmu_backing_page_sync() - Flush a MMU backing page from the CPU to the
341 * @page: Target backing page.
348 * make to the backing page in the immediate future.
351 pvr_mmu_backing_page_sync(struct pvr_mmu_backing_page *page, u32 flags) in pvr_mmu_backing_page_sync() argument
353 struct pvr_device *pvr_dev = page->pvr_dev; in pvr_mmu_backing_page_sync()
363 dev = from_pvr_device(pvr_dev)->dev; in pvr_mmu_backing_page_sync()
365 dma_sync_single_for_device(dev, page->dma_addr, in pvr_mmu_backing_page_sync()
372 * DOC: Raw page tables
389 * struct pvr_page_table_l2_entry_raw - A single entry in a level 2 page table.
392 * This type is a structure for type-checking purposes. At compile-time, its
397 * .. flat-table::
399 * :stub-columns: 1
401 * * - 31..4
402 * - **Level 1 Page Table Base Address:** Bits 39..12 of the L1
403 * page table base address, which is 4KiB aligned.
405 * * - 3..2
406 * - *(reserved)*
408 * * - 1
409 * - **Pending:** When valid bit is not set, indicates that a valid
411 * the entry. This is used to support page demand mapping of
414 * * - 0
415 * - **Valid:** Indicates that the entry contains a valid L1 page
417 * the page would result in a page fault.
432 * pvr_page_table_l2_entry_raw_set() - Write a valid entry into a raw level 2
433 * page table.
434 * @entry: Target raw level 2 page table entry.
435 * @child_table_dma_addr: DMA address of the level 1 page table to be
447 WRITE_ONCE(entry->val, in pvr_page_table_l2_entry_raw_set()
456 WRITE_ONCE(entry->val, 0); in pvr_page_table_l2_entry_raw_clear()
460 * struct pvr_page_table_l1_entry_raw - A single entry in a level 1 page table.
463 * This type is a structure for type-checking purposes. At compile-time, its
468 * .. flat-table::
470 * :stub-columns: 1
472 * * - 63..41
473 * - *(reserved)*
475 * * - 40
476 * - **Pending:** When valid bit is not set, indicates that a valid entry
478 * This is used to support page demand mapping of memory.
480 * * - 39..5
481 * - **Level 0 Page Table Base Address:** The way this value is
482 * interpreted depends on the page size. Bits not specified in the
483 * table below (e.g. bits 11..5 for page size 4KiB) should be
486 * This table shows the bits used in an L1 page table entry to
487 * represent the Physical Table Base Address for a given Page Size.
488 * Since each L1 page table entry covers 2MiB of address space, the
489 * maximum page size is 2MiB.
491 * .. flat-table::
493 * :header-rows: 1
494 * :stub-columns: 1
496 * * - Page size
497 * - L0 page table base address bits
498 * - Number of L0 page table entries
499 * - Size of L0 page table
501 * * - 4KiB
502 * - 39..12
503 * - 512
504 * - 4KiB
506 * * - 16KiB
507 * - 39..10
508 * - 128
509 * - 1KiB
511 * * - 64KiB
512 * - 39..8
513 * - 32
514 * - 256B
516 * * - 256KiB
517 * - 39..6
518 * - 8
519 * - 64B
521 * * - 1MiB
522 * - 39..5 (4 = '0')
523 * - 2
524 * - 16B
526 * * - 2MiB
527 * - 39..5 (4..3 = '00')
528 * - 1
529 * - 8B
531 * * - 4
532 * - *(reserved)*
534 * * - 3..1
535 * - **Page Size:** Sets the page size, from 4KiB to 2MiB.
537 * * - 0
538 * - **Valid:** Indicates that the entry contains a valid L0 page table.
539 * If the valid bit is not set, then an attempted use of the page would
540 * result in a page fault.
555 * pvr_page_table_l1_entry_raw_set() - Write a valid entry into a raw level 1
556 * page table.
557 * @entry: Target raw level 1 page table entry.
558 * @child_table_dma_addr: DMA address of the level 0 page table to be
568 WRITE_ONCE(entry->val, in pvr_page_table_l1_entry_raw_set()
573 * The use of a 4K-specific macro here is correct. It is in pvr_page_table_l1_entry_raw_set()
574 * a future optimization to allocate sub-host-page-sized in pvr_page_table_l1_entry_raw_set()
576 * page table address is aligned to the size of the in pvr_page_table_l1_entry_raw_set()
585 WRITE_ONCE(entry->val, 0); in pvr_page_table_l1_entry_raw_clear()
589 * struct pvr_page_table_l0_entry_raw - A single entry in a level 0 page table.
592 * This type is a structure for type-checking purposes. At compile-time, its
597 * .. flat-table::
599 * :stub-columns: 1
601 * * - 63
602 * - *(reserved)*
604 * * - 62
605 * - **PM/FW Protect:** Indicates a protected region which only the
608 * * - 61..40
609 * - **VP Page (High):** Virtual-physical page used for Parameter Manager
610 * (PM) memory. This field is only used if the additional level of PB
611 * virtualization is enabled. The VP Page field is needed by the PM in
615 * value is always aligned to the 4KiB page size.
617 * * - 39..12
618 * - **Physical Page Address:** The way this value is interpreted depends
619 * on the page size. Bits not specified in the table below (e.g. bits
620 * 20..12 for page size 2MiB) should be considered reserved.
622 * This table shows the bits used in an L0 page table entry to represent
623 * the Physical Page Address for a given page size (as defined in the
624 * associated L1 page table entry).
626 * .. flat-table::
628 * :header-rows: 1
629 * :stub-columns: 1
631 * * - Page size
632 * - Physical address bits
634 * * - 4KiB
635 * - 39..12
637 * * - 16KiB
638 * - 39..14
640 * * - 64KiB
641 * - 39..16
643 * * - 256KiB
644 * - 39..18
646 * * - 1MiB
647 * - 39..20
649 * * - 2MiB
650 * - 39..21
652 * * - 11..6
653 * - **VP Page (Low):** Continuation of VP Page (High).
655 * * - 5
656 * - **Pending:** When valid bit is not set, indicates that a valid entry
658 * This is used to support page demand mapping of memory.
660 * * - 4
661 * - **PM Src:** Set on Parameter Manager (PM) allocated page table
665 * * - 3
666 * - **SLC Bypass Control:** Specifies requests to this page should bypass
667 * the System Level Cache (SLC), if enabled in SLC configuration.
669 * * - 2
670 * - **Cache Coherency:** Indicates that the page is coherent (i.e. it
674 * * - 1
675 * - **Read Only:** If set, this bit indicates that the page is read only.
676 * An attempted write to this page would result in a write-protection
679 * * - 0
680 * - **Valid:** Indicates that the entry contains a valid page. If the
681 * valid bit is not set, then an attempted use of the page would result
682 * in a page fault.
691 * struct pvr_page_flags_raw - The configurable flags from a single entry in a
692 * level 0 page table.
715 * pvr_page_table_l0_entry_raw_set() - Write a valid entry into a raw level 0
716 * page table.
717 * @entry: Target raw level 0 page table entry.
718 * @dma_addr: DMA address of the physical page to be associated with @entry.
733 WRITE_ONCE(entry->val, PVR_PAGE_TABLE_FIELD_PREP(0, PT, VALID, true) | in pvr_page_table_l0_entry_raw_set()
742 WRITE_ONCE(entry->val, 0); in pvr_page_table_l0_entry_raw_clear()
746 * pvr_page_flags_raw_create() - Initialize the flag bits of a raw level 0 page
748 * @read_only: This page is read-only (see: Read Only).
749 * @cache_coherent: This page does not require cache flushes (see: Cache
751 * @slc_bypass: This page bypasses the device cache (see: SLC Bypass Control).
752 * @pm_fw_protect: This page is only for use by the firmware or Parameter
778 * struct pvr_page_table_l2_raw - The raw data of a level 2 page table.
780 * This type is a structure for type-checking purposes. At compile-time, its
791 * struct pvr_page_table_l1_raw - The raw data of a level 1 page table.
793 * This type is a structure for type-checking purposes. At compile-time, its
804 * struct pvr_page_table_l0_raw - The raw data of a level 0 page table.
806 * This type is a structure for type-checking purposes. At compile-time, its
811 * The size of level 0 page tables is variable depending on the page size
812 * specified in the associated level 1 page table entry. Since the device
813 * page size in use is pegged to the host page size, it cannot vary at
815 * number of entries for the current device page size. **You should never
826 * DOC: Mirror page tables
830 * We pre-declare these types because they cross-depend on pointers to each
837 * struct pvr_page_table_l2 - A wrapped level 2 page table.
843 * A level 2 page table forms the root of the page table tree structure, so
848 * @entries: The children of this node in the page table tree
863 * in this table. This value is essentially a refcount - the table is
871 * pvr_page_table_l2_init() - Initialize a level 2 page table.
872 * @table: Target level 2 page table.
880 * * Any error encountered while intializing &table->backing_page using
887 return pvr_mmu_backing_page_init(&table->backing_page, pvr_dev); in pvr_page_table_l2_init()
891 * pvr_page_table_l2_fini() - Teardown a level 2 page table.
892 * @table: Target level 2 page table.
899 pvr_mmu_backing_page_fini(&table->backing_page); in pvr_page_table_l2_fini()
903 * pvr_page_table_l2_sync() - Flush a level 2 page table from the CPU to the
905 * @table: Target level 2 page table.
912 * If child level 1 page tables of @table also need to be flushed, this should
918 pvr_mmu_backing_page_sync(&table->backing_page, PVR_MMU_SYNC_LEVEL_2_FLAGS); in pvr_page_table_l2_sync()
922 * pvr_page_table_l2_get_raw() - Access the raw equivalent of a mirror level 2
923 * page table.
924 * @table: Target level 2 page table.
937 return table->backing_page.host_ptr; in pvr_page_table_l2_get_raw()
941 * pvr_page_table_l2_get_entry_raw() - Access an entry from the raw equivalent
942 * of a mirror level 2 page table.
943 * @table: Target level 2 page table.
946 * Technically this function returns a pointer to a slot in a raw level 2 page
956 * A pointer to the requested raw level 2 page table entry.
961 return &pvr_page_table_l2_get_raw(table)->entries[idx]; in pvr_page_table_l2_get_entry_raw()
965 * pvr_page_table_l2_entry_is_valid() - Check if a level 2 page table entry is
967 * @table: Target level 2 page table.
984 * struct pvr_page_table_l1 - A wrapped level 1 page table.
992 * @entries: The children of this node in the page table tree
1007 * @parent: The parent of this node in the page table tree structure.
1011 * Only valid when the L1 page table is active. When the L1 page table
1018 * @next_free: Pointer to the next L1 page table to take/free.
1020 * Used to form a linked list of L1 page tables. This is used
1021 * when preallocating tables and when the page table has been
1035 * in this table. This value is essentially a refcount - the table is
1043 * pvr_page_table_l1_init() - Initialize a level 1 page table.
1044 * @table: Target level 1 page table.
1048 * valid. It must be inserted into the page table tree structure with
1056 * * Any error encountered while intializing &table->backing_page using
1063 table->parent_idx = PVR_IDX_INVALID; in pvr_page_table_l1_init()
1065 return pvr_mmu_backing_page_init(&table->backing_page, pvr_dev); in pvr_page_table_l1_init()
1069 * pvr_page_table_l1_free() - Teardown a level 1 page table.
1070 * @table: Target level 1 page table.
1079 pvr_mmu_backing_page_fini(&table->backing_page); in pvr_page_table_l1_free()
1084 * pvr_page_table_l1_sync() - Flush a level 1 page table from the CPU to the
1086 * @table: Target level 1 page table.
1093 * If child level 0 page tables of @table also need to be flushed, this should
1099 pvr_mmu_backing_page_sync(&table->backing_page, PVR_MMU_SYNC_LEVEL_1_FLAGS); in pvr_page_table_l1_sync()
1103 * pvr_page_table_l1_get_raw() - Access the raw equivalent of a mirror level 1
1104 * page table.
1105 * @table: Target level 1 page table.
1118 return table->backing_page.host_ptr; in pvr_page_table_l1_get_raw()
1122 * pvr_page_table_l1_get_entry_raw() - Access an entry from the raw equivalent
1123 * of a mirror level 1 page table.
1124 * @table: Target level 1 page table.
1127 * Technically this function returns a pointer to a slot in a raw level 1 page
1137 * A pointer to the requested raw level 1 page table entry.
1142 return &pvr_page_table_l1_get_raw(table)->entries[idx]; in pvr_page_table_l1_get_entry_raw()
1146 * pvr_page_table_l1_entry_is_valid() - Check if a level 1 page table entry is
1148 * @table: Target level 1 page table.
1165 * struct pvr_page_table_l0 - A wrapped level 0 page table.
1171 * There is no mirror representation of an individual page, so this type has no
1183 * @parent: The parent of this node in the page table tree structure.
1187 * Only valid when the L0 page table is active. When the L0 page table
1194 * @next_free: Pointer to the next L0 page table to take/free.
1196 * Used to form a linked list of L0 page tables. This is used
1197 * when preallocating tables and when the page table has been
1211 * in this table. This value is essentially a refcount - the table is
1219 * pvr_page_table_l0_init() - Initialize a level 0 page table.
1220 * @table: Target level 0 page table.
1224 * valid. It must be inserted into the page table tree structure with
1232 * * Any error encountered while intializing &table->backing_page using
1239 table->parent_idx = PVR_IDX_INVALID; in pvr_page_table_l0_init()
1241 return pvr_mmu_backing_page_init(&table->backing_page, pvr_dev); in pvr_page_table_l0_init()
1245 * pvr_page_table_l0_free() - Teardown a level 0 page table.
1246 * @table: Target level 0 page table.
1255 pvr_mmu_backing_page_fini(&table->backing_page); in pvr_page_table_l0_free()
1260 * pvr_page_table_l0_sync() - Flush a level 0 page table from the CPU to the
1262 * @table: Target level 0 page table.
1276 pvr_mmu_backing_page_sync(&table->backing_page, PVR_MMU_SYNC_LEVEL_0_FLAGS); in pvr_page_table_l0_sync()
1280 * pvr_page_table_l0_get_raw() - Access the raw equivalent of a mirror level 0
1281 * page table.
1282 * @table: Target level 0 page table.
1295 return table->backing_page.host_ptr; in pvr_page_table_l0_get_raw()
1299 * pvr_page_table_l0_get_entry_raw() - Access an entry from the raw equivalent
1300 * of a mirror level 0 page table.
1301 * @table: Target level 0 page table.
1304 * Technically this function returns a pointer to a slot in a raw level 0 page
1311 * returned pointer. This is espcially important for level 0 page tables, which
1315 * A pointer to the requested raw level 0 page table entry.
1320 return &pvr_page_table_l0_get_raw(table)->entries[idx]; in pvr_page_table_l0_get_entry_raw()
1324 * pvr_page_table_l0_entry_is_valid() - Check if a level 0 page table entry is
1326 * @table: Target level 0 page table.
1343 * struct pvr_mmu_context - context holding data for operations at page
1344 * catalogue level, intended for use with a VM context.
1355 * struct pvr_page_table_ptr - A reference to a single physical page as indexed
1356 * by the page table structure.
1362 * @l1_table: A cached handle to the level 1 page table the
1368 * @l0_table: A cached handle to the level 0 page table the
1374 * @l2_idx: Index into the level 2 page table the context is
1380 * @l1_idx: Index into the level 1 page table the context is
1386 * @l0_idx: Index into the level 0 page table the context is
1393 * struct pvr_mmu_op_context - context holding data for individual
1394 * device-virtual mapping operations. Intended for use with a VM bind operation.
1404 * this context - these are currently pinned when initialising
1409 /** @sgt_offset: Start address of the device-virtual mapping. */
1413 * @l1_prealloc_tables: Preallocated l1 page table objects
1414 * use by this context when creating a page mapping. Linked list
1420 * @l0_prealloc_tables: Preallocated l0 page table objects
1421 * use by this context when creating a page mapping. Linked list
1430 * @l1_free_tables: Collects page table objects freed by unmap
1436 * @l0_free_tables: Collects page table objects freed by unmap
1443 * @curr_page: A reference to a single physical page as indexed by the
1444 * page table structure.
1449 * @sync_level_required: The maximum level of the page table tree
1460 * pvr_page_table_l2_insert() - Insert an entry referring to a level 1 page
1461 * table into a level 2 page table.
1462 * @op_ctx: Target MMU op context pointing at the entry to insert the L1 page
1464 * @child_table: Target level 1 page table to be referenced by the new entry.
1477 &op_ctx->mmu_ctx->page_table_l2; in pvr_page_table_l2_insert()
1480 op_ctx->curr_page.l2_idx); in pvr_page_table_l2_insert()
1483 child_table->backing_page.dma_addr); in pvr_page_table_l2_insert()
1485 child_table->parent = l2_table; in pvr_page_table_l2_insert()
1486 child_table->parent_idx = op_ctx->curr_page.l2_idx; in pvr_page_table_l2_insert()
1487 l2_table->entries[op_ctx->curr_page.l2_idx] = child_table; in pvr_page_table_l2_insert()
1488 ++l2_table->entry_count; in pvr_page_table_l2_insert()
1489 op_ctx->curr_page.l1_table = child_table; in pvr_page_table_l2_insert()
1493 * pvr_page_table_l2_remove() - Remove a level 1 page table from a level 2 page
1504 &op_ctx->mmu_ctx->page_table_l2; in pvr_page_table_l2_remove()
1507 op_ctx->curr_page.l1_table->parent_idx); in pvr_page_table_l2_remove()
1509 WARN_ON(op_ctx->curr_page.l1_table->parent != l2_table); in pvr_page_table_l2_remove()
1513 l2_table->entries[op_ctx->curr_page.l1_table->parent_idx] = NULL; in pvr_page_table_l2_remove()
1514 op_ctx->curr_page.l1_table->parent_idx = PVR_IDX_INVALID; in pvr_page_table_l2_remove()
1515 op_ctx->curr_page.l1_table->next_free = op_ctx->unmap.l1_free_tables; in pvr_page_table_l2_remove()
1516 op_ctx->unmap.l1_free_tables = op_ctx->curr_page.l1_table; in pvr_page_table_l2_remove()
1517 op_ctx->curr_page.l1_table = NULL; in pvr_page_table_l2_remove()
1519 --l2_table->entry_count; in pvr_page_table_l2_remove()
1523 * pvr_page_table_l1_insert() - Insert an entry referring to a level 0 page
1524 * table into a level 1 page table.
1525 * @op_ctx: Target MMU op context pointing at the entry to insert the L0 page
1527 * @child_table: L0 page table to insert.
1540 pvr_page_table_l1_get_entry_raw(op_ctx->curr_page.l1_table, in pvr_page_table_l1_insert()
1541 op_ctx->curr_page.l1_idx); in pvr_page_table_l1_insert()
1544 child_table->backing_page.dma_addr); in pvr_page_table_l1_insert()
1546 child_table->parent = op_ctx->curr_page.l1_table; in pvr_page_table_l1_insert()
1547 child_table->parent_idx = op_ctx->curr_page.l1_idx; in pvr_page_table_l1_insert()
1548 op_ctx->curr_page.l1_table->entries[op_ctx->curr_page.l1_idx] = child_table; in pvr_page_table_l1_insert()
1549 ++op_ctx->curr_page.l1_table->entry_count; in pvr_page_table_l1_insert()
1550 op_ctx->curr_page.l0_table = child_table; in pvr_page_table_l1_insert()
1554 * pvr_page_table_l1_remove() - Remove a level 0 page table from a level 1 page
1559 * from its parent level 2 page table and destroyed.
1568 pvr_page_table_l1_get_entry_raw(op_ctx->curr_page.l0_table->parent, in pvr_page_table_l1_remove()
1569 op_ctx->curr_page.l0_table->parent_idx); in pvr_page_table_l1_remove()
1571 WARN_ON(op_ctx->curr_page.l0_table->parent != in pvr_page_table_l1_remove()
1572 op_ctx->curr_page.l1_table); in pvr_page_table_l1_remove()
1576 op_ctx->curr_page.l1_table->entries[op_ctx->curr_page.l0_table->parent_idx] = NULL; in pvr_page_table_l1_remove()
1577 op_ctx->curr_page.l0_table->parent_idx = PVR_IDX_INVALID; in pvr_page_table_l1_remove()
1578 op_ctx->curr_page.l0_table->next_free = op_ctx->unmap.l0_free_tables; in pvr_page_table_l1_remove()
1579 op_ctx->unmap.l0_free_tables = op_ctx->curr_page.l0_table; in pvr_page_table_l1_remove()
1580 op_ctx->curr_page.l0_table = NULL; in pvr_page_table_l1_remove()
1582 if (--op_ctx->curr_page.l1_table->entry_count == 0) { in pvr_page_table_l1_remove()
1583 /* Clear the parent L2 page table entry. */ in pvr_page_table_l1_remove()
1584 if (op_ctx->curr_page.l1_table->parent_idx != PVR_IDX_INVALID) in pvr_page_table_l1_remove()
1590 * pvr_page_table_l0_insert() - Insert an entry referring to a physical page
1591 * into a level 0 page table.
1594 * @flags: Page options to be stored in the new entry.
1604 pvr_page_table_l0_get_entry_raw(op_ctx->curr_page.l0_table, in pvr_page_table_l0_insert()
1605 op_ctx->curr_page.l0_idx); in pvr_page_table_l0_insert()
1610 * There is no entry to set here - we don't keep a mirror of in pvr_page_table_l0_insert()
1614 ++op_ctx->curr_page.l0_table->entry_count; in pvr_page_table_l0_insert()
1618 * pvr_page_table_l0_remove() - Remove a physical page from a level 0 page
1623 * from its parent L1 page table and destroyed.
1632 pvr_page_table_l0_get_entry_raw(op_ctx->curr_page.l0_table, in pvr_page_table_l0_remove()
1633 op_ctx->curr_page.l0_idx); in pvr_page_table_l0_remove()
1638 * There is no entry to clear here - we don't keep a mirror of in pvr_page_table_l0_remove()
1642 if (--op_ctx->curr_page.l0_table->entry_count == 0) { in pvr_page_table_l0_remove()
1643 /* Clear the parent L1 page table entry. */ in pvr_page_table_l0_remove()
1644 if (op_ctx->curr_page.l0_table->parent_idx != PVR_IDX_INVALID) in pvr_page_table_l0_remove()
1650 * DOC: Page table index utilities
1654 * pvr_page_table_l2_idx() - Calculate the level 2 page table index for a
1655 * device-virtual address.
1656 * @device_addr: Target device-virtual address.
1658 * This function does not perform any bounds checking - it is the caller's
1663 * The index into a level 2 page table corresponding to @device_addr.
1673 * pvr_page_table_l1_idx() - Calculate the level 1 page table index for a
1674 * device-virtual address.
1675 * @device_addr: Target device-virtual address.
1677 * This function does not perform any bounds checking - it is the caller's
1682 * The index into a level 1 page table corresponding to @device_addr.
1692 * pvr_page_table_l0_idx() - Calculate the level 0 page table index for a
1693 * device-virtual address.
1694 * @device_addr: Target device-virtual address.
1696 * This function does not perform any bounds checking - it is the caller's
1701 * The index into a level 0 page table corresponding to @device_addr.
1711 * DOC: High-level page table operations
1715 * pvr_page_table_l1_get_or_insert() - Retrieves (optionally inserting if
1716 * necessary) a level 1 page table from the specified level 2 page table entry.
1718 * @should_insert: [IN] Specifies whether new page tables should be inserted
1719 * when empty page table entries are encountered during traversal.
1725 * * -%ENXIO if a level 1 page table would have been inserted.
1728 * * Any error encountered while inserting the level 1 page table.
1735 &op_ctx->mmu_ctx->page_table_l2; in pvr_page_table_l1_get_or_insert()
1739 op_ctx->curr_page.l2_idx)) { in pvr_page_table_l1_get_or_insert()
1740 op_ctx->curr_page.l1_table = in pvr_page_table_l1_get_or_insert()
1741 l2_table->entries[op_ctx->curr_page.l2_idx]; in pvr_page_table_l1_get_or_insert()
1746 return -ENXIO; in pvr_page_table_l1_get_or_insert()
1749 table = op_ctx->map.l1_prealloc_tables; in pvr_page_table_l1_get_or_insert()
1751 return -ENOMEM; in pvr_page_table_l1_get_or_insert()
1754 op_ctx->map.l1_prealloc_tables = table->next_free; in pvr_page_table_l1_get_or_insert()
1755 table->next_free = NULL; in pvr_page_table_l1_get_or_insert()
1757 /* Ensure new table is fully written out before adding to L2 page table. */ in pvr_page_table_l1_get_or_insert()
1766 * pvr_page_table_l0_get_or_insert() - Retrieves (optionally inserting if
1767 * necessary) a level 0 page table from the specified level 1 page table entry.
1769 * @should_insert: [IN] Specifies whether new page tables should be inserted
1770 * when empty page table entries are encountered during traversal.
1776 * * -%ENXIO if a level 0 page table would have been inserted.
1779 * * Any error encountered while inserting the level 0 page table.
1787 if (pvr_page_table_l1_entry_is_valid(op_ctx->curr_page.l1_table, in pvr_page_table_l0_get_or_insert()
1788 op_ctx->curr_page.l1_idx)) { in pvr_page_table_l0_get_or_insert()
1789 op_ctx->curr_page.l0_table = in pvr_page_table_l0_get_or_insert()
1790 op_ctx->curr_page.l1_table->entries[op_ctx->curr_page.l1_idx]; in pvr_page_table_l0_get_or_insert()
1795 return -ENXIO; in pvr_page_table_l0_get_or_insert()
1798 table = op_ctx->map.l0_prealloc_tables; in pvr_page_table_l0_get_or_insert()
1800 return -ENOMEM; in pvr_page_table_l0_get_or_insert()
1803 op_ctx->map.l0_prealloc_tables = table->next_free; in pvr_page_table_l0_get_or_insert()
1804 table->next_free = NULL; in pvr_page_table_l0_get_or_insert()
1806 /* Ensure new table is fully written out before adding to L1 page table. */ in pvr_page_table_l0_get_or_insert()
1815 * pvr_mmu_context_create() - Create an MMU context.
1820 * * -%ENOMEM if no memory is available,
1829 return ERR_PTR(-ENOMEM); in pvr_mmu_context_create()
1831 err = pvr_page_table_l2_init(&ctx->page_table_l2, pvr_dev); in pvr_mmu_context_create()
1835 ctx->pvr_dev = pvr_dev; in pvr_mmu_context_create()
1841 * pvr_mmu_context_destroy() - Destroy an MMU context.
1846 pvr_page_table_l2_fini(&ctx->page_table_l2); in pvr_mmu_context_destroy()
1851 * pvr_mmu_get_root_table_dma_addr() - Get the DMA address of the root of the
1852 * page table structure behind a VM context.
1857 return ctx->page_table_l2.backing_page.dma_addr; in pvr_mmu_get_root_table_dma_addr()
1861 * pvr_page_table_l1_alloc() - Allocate a l1 page_table object.
1865 * * Newly created page table object on success, or
1866 * * -%ENOMEM if no memory is available,
1878 return ERR_PTR(-ENOMEM); in pvr_page_table_l1_alloc()
1880 err = pvr_page_table_l1_init(table, ctx->pvr_dev); in pvr_page_table_l1_alloc()
1890 * pvr_page_table_l0_alloc() - Allocate a l0 page_table object.
1894 * * Newly created page table object on success, or
1895 * * -%ENOMEM if no memory is available,
1907 return ERR_PTR(-ENOMEM); in pvr_page_table_l0_alloc()
1909 err = pvr_page_table_l0_init(table, ctx->pvr_dev); in pvr_page_table_l0_alloc()
1919 * pvr_mmu_op_context_require_sync() - Mark an MMU op context as requiring a
1920 * sync operation for the referenced page tables up to a specified level.
1922 * @level: Maximum page table level for which a sync is required.
1926 enum pvr_mmu_sync_level level) in pvr_mmu_op_context_require_sync() argument
1928 if (op_ctx->sync_level_required < level) in pvr_mmu_op_context_require_sync()
1929 op_ctx->sync_level_required = level; in pvr_mmu_op_context_require_sync()
1933 * pvr_mmu_op_context_sync_manual() - Trigger a sync of some or all of the
1934 * page tables referenced by a MMU op context.
1936 * @level: Maximum page table level to sync.
1940 * value of &op_ctx->sync_level_required as set by
1945 enum pvr_mmu_sync_level level) in pvr_mmu_op_context_sync_manual() argument
1948 * We sync the page table levels in ascending order (starting from the in pvr_mmu_op_context_sync_manual()
1952 WARN_ON(level < PVR_MMU_SYNC_LEVEL_NONE); in pvr_mmu_op_context_sync_manual()
1954 if (level <= PVR_MMU_SYNC_LEVEL_NONE) in pvr_mmu_op_context_sync_manual()
1957 if (op_ctx->curr_page.l0_table) in pvr_mmu_op_context_sync_manual()
1958 pvr_page_table_l0_sync(op_ctx->curr_page.l0_table); in pvr_mmu_op_context_sync_manual()
1960 if (level < PVR_MMU_SYNC_LEVEL_1) in pvr_mmu_op_context_sync_manual()
1963 if (op_ctx->curr_page.l1_table) in pvr_mmu_op_context_sync_manual()
1964 pvr_page_table_l1_sync(op_ctx->curr_page.l1_table); in pvr_mmu_op_context_sync_manual()
1966 if (level < PVR_MMU_SYNC_LEVEL_2) in pvr_mmu_op_context_sync_manual()
1969 pvr_page_table_l2_sync(&op_ctx->mmu_ctx->page_table_l2); in pvr_mmu_op_context_sync_manual()
1973 * pvr_mmu_op_context_sync_partial() - Trigger a sync of some or all of the
1974 * page tables referenced by a MMU op context.
1976 * @level: Requested page table level to sync up to (inclusive).
1978 * If @level is greater than the maximum level recorded by @op_ctx as requiring
1981 * Additionally, if @level is greater than or equal to the maximum level
1982 * recorded by @op_ctx as requiring a sync operation, that maximum level will be
1988 enum pvr_mmu_sync_level level) in pvr_mmu_op_context_sync_partial() argument
1991 * If the requested sync level is greater than or equal to the in pvr_mmu_op_context_sync_partial()
1992 * currently required sync level, we do two things: in pvr_mmu_op_context_sync_partial()
1995 * * Reset the required sync level since we are about to sync in pvr_mmu_op_context_sync_partial()
1998 if (level >= op_ctx->sync_level_required) { in pvr_mmu_op_context_sync_partial()
1999 level = op_ctx->sync_level_required; in pvr_mmu_op_context_sync_partial()
2000 op_ctx->sync_level_required = PVR_MMU_SYNC_LEVEL_NONE; in pvr_mmu_op_context_sync_partial()
2003 pvr_mmu_op_context_sync_manual(op_ctx, level); in pvr_mmu_op_context_sync_partial()
2007 * pvr_mmu_op_context_sync() - Trigger a sync of every page table referenced by
2011 * The maximum level marked internally as requiring a sync will be reset so
2012 * that subsequent calls to this function will be no-ops unless @op_ctx is
2018 pvr_mmu_op_context_sync_manual(op_ctx, op_ctx->sync_level_required); in pvr_mmu_op_context_sync()
2020 op_ctx->sync_level_required = PVR_MMU_SYNC_LEVEL_NONE; in pvr_mmu_op_context_sync()
2024 * pvr_mmu_op_context_load_tables() - Load pointers to tables in each level of
2025 * the page table tree structure needed to reference the physical page
2028 * @should_create: Specifies whether new page tables should be created when
2029 * empty page table entries are encountered during traversal.
2030 * @load_level_required: Maximum page table level to load.
2033 * sync level of @op_ctx as new page tables are created and inserted into their
2036 * Since there is only one root page table, it is technically incorrect to call
2038 * the root level number. However, this is not explicitly disallowed here.
2043 * @load_level_required >= 1 except -%ENXIO, or
2045 * @load_level_required >= 0 except -%ENXIO.
2053 op_ctx->map.l1_prealloc_tables; in pvr_mmu_op_context_load_tables()
2055 op_ctx->map.l0_prealloc_tables; in pvr_mmu_op_context_load_tables()
2060 op_ctx->curr_page.l1_table = NULL; in pvr_mmu_op_context_load_tables()
2063 op_ctx->curr_page.l0_table = NULL; in pvr_mmu_op_context_load_tables()
2065 /* Get or create L1 page table. */ in pvr_mmu_op_context_load_tables()
2070 * If @should_create is %false and no L1 page table was in pvr_mmu_op_context_load_tables()
2073 * -%ENXIO if @should_create is %false, there is no in pvr_mmu_op_context_load_tables()
2076 if (err == -ENXIO) in pvr_mmu_op_context_load_tables()
2083 /* Get or create L0 page table. */ in pvr_mmu_op_context_load_tables()
2088 * If @should_create is %false and no L0 page table was in pvr_mmu_op_context_load_tables()
2091 * -%ENXIO if @should_create is %false, there is no in pvr_mmu_op_context_load_tables()
2094 if (err == -ENXIO) in pvr_mmu_op_context_load_tables()
2098 * At this point, an L1 page table could have been in pvr_mmu_op_context_load_tables()
2100 * at inserting an L0 page table. In this instance, we in pvr_mmu_op_context_load_tables()
2101 * must remove the empty L1 page table ourselves as in pvr_mmu_op_context_load_tables()
2106 if (l1_head_before != op_ctx->map.l1_prealloc_tables) { in pvr_mmu_op_context_load_tables()
2120 if (l1_head_before != op_ctx->map.l1_prealloc_tables) in pvr_mmu_op_context_load_tables()
2122 else if (l0_head_before != op_ctx->map.l0_prealloc_tables) in pvr_mmu_op_context_load_tables()
2129 * pvr_mmu_op_context_set_curr_page() - Reassign the current page of an MMU op
2130 * context, syncing any page tables previously assigned to it which are no
2134 * @should_create: Specify whether new page tables should be created when
2135 * empty page table entries are encountered during traversal.
2150 op_ctx->curr_page.l2_idx = pvr_page_table_l2_idx(device_addr); in pvr_mmu_op_context_set_curr_page()
2151 op_ctx->curr_page.l1_idx = pvr_page_table_l1_idx(device_addr); in pvr_mmu_op_context_set_curr_page()
2152 op_ctx->curr_page.l0_idx = pvr_page_table_l0_idx(device_addr); in pvr_mmu_op_context_set_curr_page()
2153 op_ctx->curr_page.l1_table = NULL; in pvr_mmu_op_context_set_curr_page()
2154 op_ctx->curr_page.l0_table = NULL; in pvr_mmu_op_context_set_curr_page()
2161 * pvr_mmu_op_context_next_page() - Advance the current page of an MMU op
2164 * @should_create: Specify whether new page tables should be created when
2165 * empty page table entries are encountered during traversal.
2168 * the state of the table references in @op_ctx is valid on return. If -%ENXIO
2170 * noted that @op_ctx as a whole will be left in a valid state if -%ENXIO is
2172 * are invalid by comparing them to %NULL. Only &@ptr->l2_table is guaranteed
2173 * to be valid, since it represents the root of the page table tree structure.
2177 * * -%EPERM if the operation would wrap at the top of the page table
2179 * * -%ENXIO if @should_create is %false and a page table of any level would
2181 * * Any error returned while attempting to create missing page tables if
2190 if (++op_ctx->curr_page.l0_idx != ROGUE_MMUCTRL_ENTRIES_PT_VALUE_X) in pvr_mmu_op_context_next_page()
2193 op_ctx->curr_page.l0_idx = 0; in pvr_mmu_op_context_next_page()
2196 if (++op_ctx->curr_page.l1_idx != ROGUE_MMUCTRL_ENTRIES_PD_VALUE) in pvr_mmu_op_context_next_page()
2199 op_ctx->curr_page.l1_idx = 0; in pvr_mmu_op_context_next_page()
2202 if (++op_ctx->curr_page.l2_idx != ROGUE_MMUCTRL_ENTRIES_PC_VALUE) in pvr_mmu_op_context_next_page()
2206 * If the pattern continued, we would set &op_ctx->curr_page.l2_idx to in pvr_mmu_op_context_next_page()
2207 * zero here. However, that would wrap the top layer of the page table in pvr_mmu_op_context_next_page()
2212 "%s(%p) attempted to loop the top of the page table hierarchy", in pvr_mmu_op_context_next_page()
2214 return -EPERM; in pvr_mmu_op_context_next_page()
2221 /* Then load tables from the required level down. */ in pvr_mmu_op_context_next_page()
2227 * DOC: Single page operations
2231 * pvr_page_create() - Create a device-virtual memory page and insert it into
2232 * a level 0 page table.
2233 * @op_ctx: Target MMU op context pointing at the device-virtual address of the
2234 * target page.
2235 * @dma_addr: DMA address of the physical page backing the created page.
2236 * @flags: Page options saved on the level 0 page table entry for reading by
2241 * * -%EEXIST if the requested page already exists.
2247 /* Do not create a new page if one already exists. */ in pvr_page_create()
2248 if (pvr_page_table_l0_entry_is_valid(op_ctx->curr_page.l0_table, in pvr_page_create()
2249 op_ctx->curr_page.l0_idx)) { in pvr_page_create()
2250 return -EEXIST; in pvr_page_create()
2261 * pvr_page_destroy() - Destroy a device page after removing it from its
2262 * parent level 0 page table.
2268 /* Do nothing if the page does not exist. */ in pvr_page_destroy()
2269 if (!pvr_page_table_l0_entry_is_valid(op_ctx->curr_page.l0_table, in pvr_page_destroy()
2270 op_ctx->curr_page.l0_idx)) { in pvr_page_destroy()
2274 /* Clear the parent L0 page table entry. */ in pvr_page_destroy()
2281 * pvr_mmu_op_context_destroy() - Destroy an MMU op context.
2287 op_ctx->sync_level_required != PVR_MMU_SYNC_LEVEL_NONE; in pvr_mmu_op_context_destroy()
2292 if (flush_caches && !op_ctx->map.sgt) in pvr_mmu_op_context_destroy()
2293 pvr_mmu_flush_exec(op_ctx->mmu_ctx->pvr_dev, true); in pvr_mmu_op_context_destroy()
2295 while (op_ctx->map.l0_prealloc_tables) { in pvr_mmu_op_context_destroy()
2296 struct pvr_page_table_l0 *tmp = op_ctx->map.l0_prealloc_tables; in pvr_mmu_op_context_destroy()
2298 op_ctx->map.l0_prealloc_tables = in pvr_mmu_op_context_destroy()
2299 op_ctx->map.l0_prealloc_tables->next_free; in pvr_mmu_op_context_destroy()
2303 while (op_ctx->map.l1_prealloc_tables) { in pvr_mmu_op_context_destroy()
2304 struct pvr_page_table_l1 *tmp = op_ctx->map.l1_prealloc_tables; in pvr_mmu_op_context_destroy()
2306 op_ctx->map.l1_prealloc_tables = in pvr_mmu_op_context_destroy()
2307 op_ctx->map.l1_prealloc_tables->next_free; in pvr_mmu_op_context_destroy()
2311 while (op_ctx->unmap.l0_free_tables) { in pvr_mmu_op_context_destroy()
2312 struct pvr_page_table_l0 *tmp = op_ctx->unmap.l0_free_tables; in pvr_mmu_op_context_destroy()
2314 op_ctx->unmap.l0_free_tables = in pvr_mmu_op_context_destroy()
2315 op_ctx->unmap.l0_free_tables->next_free; in pvr_mmu_op_context_destroy()
2319 while (op_ctx->unmap.l1_free_tables) { in pvr_mmu_op_context_destroy()
2320 struct pvr_page_table_l1 *tmp = op_ctx->unmap.l1_free_tables; in pvr_mmu_op_context_destroy()
2322 op_ctx->unmap.l1_free_tables = in pvr_mmu_op_context_destroy()
2323 op_ctx->unmap.l1_free_tables->next_free; in pvr_mmu_op_context_destroy()
2331 * pvr_mmu_op_context_create() - Create an MMU op context.
2334 * @sgt_offset: Start offset of the requested device-virtual memory mapping.
2335 * @size: Size in bytes of the requested device-virtual memory mapping. For an
2336 * unmapping, this should be zero so that no page tables are allocated.
2340 * * -%ENOMEM if no memory is available,
2353 return ERR_PTR(-ENOMEM); in pvr_mmu_op_context_create()
2355 op_ctx->mmu_ctx = ctx; in pvr_mmu_op_context_create()
2356 op_ctx->map.sgt = sgt; in pvr_mmu_op_context_create()
2357 op_ctx->map.sgt_offset = sgt_offset; in pvr_mmu_op_context_create()
2358 op_ctx->sync_level_required = PVR_MMU_SYNC_LEVEL_NONE; in pvr_mmu_op_context_create()
2362 * The number of page table objects we need to prealloc is in pvr_mmu_op_context_create()
2370 const u32 l1_count = l1_end_idx - l1_start_idx + 1; in pvr_mmu_op_context_create()
2373 const u32 l0_count = l0_end_idx - l0_start_idx + 1; in pvr_mmu_op_context_create()
2376 * Alloc and push page table entries until we have enough of in pvr_mmu_op_context_create()
2388 l1_tmp->next_free = op_ctx->map.l1_prealloc_tables; in pvr_mmu_op_context_create()
2389 op_ctx->map.l1_prealloc_tables = l1_tmp; in pvr_mmu_op_context_create()
2400 l0_tmp->next_free = op_ctx->map.l0_prealloc_tables; in pvr_mmu_op_context_create()
2401 op_ctx->map.l0_prealloc_tables = l0_tmp; in pvr_mmu_op_context_create()
2414 * pvr_mmu_op_context_unmap_curr_page() - Unmap pages from a memory context
2415 * starting from the current page of an MMU op context.
2416 * @op_ctx: Target MMU op context pointing at the first page to unmap.
2422 * pvr_mmu_op_context_next_page() (except -%ENXIO).
2434 * Destroy first page outside loop, as it doesn't require a page in pvr_mmu_op_context_unmap_curr_page()
2435 * advance beforehand. If the L0 page table reference in in pvr_mmu_op_context_unmap_curr_page()
2436 * @op_ctx.curr_page is %NULL, there cannot be a mapped page at in pvr_mmu_op_context_unmap_curr_page()
2439 if (op_ctx->curr_page.l0_table) in pvr_mmu_op_context_unmap_curr_page()
2442 for (u64 page = 1; page < nr_pages; ++page) { in pvr_mmu_op_context_unmap_curr_page() local
2445 * If the page table tree structure at @op_ctx.curr_page is in pvr_mmu_op_context_unmap_curr_page()
2452 if (err == -ENXIO) in pvr_mmu_op_context_unmap_curr_page()
2464 * pvr_mmu_unmap() - Unmap pages from a memory context.
2466 * @device_addr: First device-virtual address to unmap.
2469 * The total amount of device-virtual memory unmapped is
2489 * pvr_mmu_map_sgl() - Map part of a scatter-gather table entry to
2490 * device-virtual memory.
2491 * @op_ctx: Target MMU op context pointing to the first page that should be
2493 * @sgl: Target scatter-gather table entry.
2495 * from @sgl which is CPU page-aligned.
2496 * @size: Size of the memory to be mapped in bytes. Must be a non-zero multiple
2497 * of the device page size.
2498 * @page_flags: Page options to be applied to every device-virtual memory page
2503 * * -%EINVAL if the range specified by @offset and @size is not completely
2505 * * Any error encountered while creating a page with pvr_page_create(), or
2517 unsigned int page; in pvr_mmu_map_sgl() local
2520 if (size > dma_len || offset > dma_len - size) in pvr_mmu_map_sgl()
2521 return -EINVAL; in pvr_mmu_map_sgl()
2527 memcpy(&ptr_copy, &op_ctx->curr_page, sizeof(ptr_copy)); in pvr_mmu_map_sgl()
2530 * Create first page outside loop, as it doesn't require a page advance in pvr_mmu_map_sgl()
2537 for (page = 1; page < pages; ++page) { in pvr_mmu_map_sgl()
2552 memcpy(&op_ctx->curr_page, &ptr_copy, sizeof(op_ctx->curr_page)); in pvr_mmu_map_sgl()
2553 err = pvr_mmu_op_context_unmap_curr_page(op_ctx, page); in pvr_mmu_map_sgl()
2559 * pvr_mmu_map() - Map an object's virtual memory to physical memory.
2561 * @size: Size of memory to be mapped in bytes. Must be a non-zero multiple
2562 * of the device page size.
2564 * @device_addr: Virtual device address to map to. Must be device page-aligned.
2585 if ((op_ctx->map.sgt_offset | size) & ~PVR_DEVICE_PAGE_MASK) in pvr_mmu_map()
2586 return -EINVAL; in pvr_mmu_map()
2590 return -EINVAL; in pvr_mmu_map()
2592 memcpy(&ptr_copy, &op_ctx->curr_page, sizeof(ptr_copy)); in pvr_mmu_map()
2599 for_each_sgtable_dma_sg(op_ctx->map.sgt, sgl, count) { in pvr_mmu_map()
2603 if (sgl_len <= op_ctx->map.sgt_offset) { in pvr_mmu_map()
2604 op_ctx->map.sgt_offset -= sgl_len; in pvr_mmu_map()
2608 sgl_offset = op_ctx->map.sgt_offset; in pvr_mmu_map()
2609 map_sgl_len = min_t(u64, sgl_len - sgl_offset, size - mapped_size); in pvr_mmu_map()
2617 * Flag the L0 page table as requiring a flush when the MMU op in pvr_mmu_map()
2622 op_ctx->map.sgt_offset = 0; in pvr_mmu_map()
2634 memcpy(&op_ctx->curr_page, &ptr_copy, sizeof(op_ctx->curr_page)); in pvr_mmu_map()