Lines Matching +full:vm +full:- +full:map
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
29 * This is the "top level" datatype in the VM code. It's exposed in the public
34 * struct pvr_vm_context - Context type used to represent a single VM.
49 /** @lock: Global lock on this VM. */
62 * @dummy_gem: GEM object to enable VM reservation. All private BOs
77 kref_get(&vm_ctx->ref_count); in pvr_vm_context_get()
83 * pvr_vm_get_page_table_root_addr() - Get the DMA address of the root of the
84 * page table structure behind a VM context.
85 * @vm_ctx: Target VM context.
89 return pvr_mmu_get_root_table_dma_addr(vm_ctx->mmu_ctx); in pvr_vm_get_page_table_root_addr()
93 * pvr_vm_get_dma_resv() - Expose the dma_resv owned by the VM context.
94 * @vm_ctx: Target VM context.
103 return vm_ctx->dummy_gem.resv; in pvr_vm_get_dma_resv()
111 * struct pvr_vm_gpuva - Wrapper type representing a single VM mapping.
126 * struct pvr_vm_bind_op - Context of a map/unmap operation.
129 /** @type: Map or unmap. */
132 /** @pvr_obj: Object associated with mapping (map only). */
136 * @vm_ctx: VM context where the mapping will be created or destroyed.
169 /** @device_addr: Device-virtual address at the start of the mapping. */
177 * pvr_vm_bind_op_exec() - Execute a single bind op.
187 switch (bind_op->type) { in pvr_vm_bind_op_exec()
189 return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr, in pvr_vm_bind_op_exec()
190 bind_op, bind_op->device_addr, in pvr_vm_bind_op_exec()
191 bind_op->size, in pvr_vm_bind_op_exec()
192 gem_from_pvr_gem(bind_op->pvr_obj), in pvr_vm_bind_op_exec()
193 bind_op->offset); in pvr_vm_bind_op_exec()
196 return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr, in pvr_vm_bind_op_exec()
197 bind_op, bind_op->device_addr, in pvr_vm_bind_op_exec()
198 bind_op->size); in pvr_vm_bind_op_exec()
206 return -EINVAL; in pvr_vm_bind_op_exec()
211 drm_gpuvm_bo_put(bind_op->gpuvm_bo); in pvr_vm_bind_op_fini()
213 kfree(bind_op->new_va); in pvr_vm_bind_op_fini()
214 kfree(bind_op->prev_va); in pvr_vm_bind_op_fini()
215 kfree(bind_op->next_va); in pvr_vm_bind_op_fini()
217 if (bind_op->pvr_obj) in pvr_vm_bind_op_fini()
218 pvr_gem_object_put(bind_op->pvr_obj); in pvr_vm_bind_op_fini()
220 if (bind_op->mmu_op_ctx) in pvr_vm_bind_op_fini()
221 pvr_mmu_op_context_destroy(bind_op->mmu_op_ctx); in pvr_vm_bind_op_fini()
231 const bool is_user = vm_ctx != vm_ctx->pvr_dev->kernel_vm_ctx; in pvr_vm_bind_op_map_init()
238 return -EINVAL; in pvr_vm_bind_op_map_init()
241 !pvr_find_heap_containing(vm_ctx->pvr_dev, device_addr, size)) { in pvr_vm_bind_op_map_init()
242 return -EINVAL; in pvr_vm_bind_op_map_init()
248 return -EINVAL; in pvr_vm_bind_op_map_init()
250 bind_op->type = PVR_VM_BIND_TYPE_MAP; in pvr_vm_bind_op_map_init()
252 dma_resv_lock(obj->resv, NULL); in pvr_vm_bind_op_map_init()
253 bind_op->gpuvm_bo = drm_gpuvm_bo_obtain(&vm_ctx->gpuvm_mgr, obj); in pvr_vm_bind_op_map_init()
254 dma_resv_unlock(obj->resv); in pvr_vm_bind_op_map_init()
255 if (IS_ERR(bind_op->gpuvm_bo)) in pvr_vm_bind_op_map_init()
256 return PTR_ERR(bind_op->gpuvm_bo); in pvr_vm_bind_op_map_init()
258 bind_op->new_va = kzalloc(sizeof(*bind_op->new_va), GFP_KERNEL); in pvr_vm_bind_op_map_init()
259 bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL); in pvr_vm_bind_op_map_init()
260 bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL); in pvr_vm_bind_op_map_init()
261 if (!bind_op->new_va || !bind_op->prev_va || !bind_op->next_va) { in pvr_vm_bind_op_map_init()
262 err = -ENOMEM; in pvr_vm_bind_op_map_init()
272 bind_op->mmu_op_ctx = in pvr_vm_bind_op_map_init()
273 pvr_mmu_op_context_create(vm_ctx->mmu_ctx, sgt, offset, size); in pvr_vm_bind_op_map_init()
274 err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx); in pvr_vm_bind_op_map_init()
276 bind_op->mmu_op_ctx = NULL; in pvr_vm_bind_op_map_init()
280 bind_op->pvr_obj = pvr_obj; in pvr_vm_bind_op_map_init()
281 bind_op->vm_ctx = vm_ctx; in pvr_vm_bind_op_map_init()
282 bind_op->device_addr = device_addr; in pvr_vm_bind_op_map_init()
283 bind_op->size = size; in pvr_vm_bind_op_map_init()
284 bind_op->offset = offset; in pvr_vm_bind_op_map_init()
303 return -EINVAL; in pvr_vm_bind_op_unmap_init()
305 bind_op->type = PVR_VM_BIND_TYPE_UNMAP; in pvr_vm_bind_op_unmap_init()
307 bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL); in pvr_vm_bind_op_unmap_init()
308 bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL); in pvr_vm_bind_op_unmap_init()
309 if (!bind_op->prev_va || !bind_op->next_va) { in pvr_vm_bind_op_unmap_init()
310 err = -ENOMEM; in pvr_vm_bind_op_unmap_init()
314 bind_op->mmu_op_ctx = in pvr_vm_bind_op_unmap_init()
315 pvr_mmu_op_context_create(vm_ctx->mmu_ctx, NULL, 0, 0); in pvr_vm_bind_op_unmap_init()
316 err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx); in pvr_vm_bind_op_unmap_init()
318 bind_op->mmu_op_ctx = NULL; in pvr_vm_bind_op_unmap_init()
322 bind_op->pvr_obj = pvr_obj; in pvr_vm_bind_op_unmap_init()
323 bind_op->vm_ctx = vm_ctx; in pvr_vm_bind_op_unmap_init()
324 bind_op->device_addr = device_addr; in pvr_vm_bind_op_unmap_init()
325 bind_op->size = size; in pvr_vm_bind_op_unmap_init()
336 * pvr_vm_gpuva_map() - Insert a mapping into a memory context.
350 struct pvr_gem_object *pvr_gem = gem_to_pvr_gem(op->map.gem.obj); in pvr_vm_gpuva_map()
354 if ((op->map.gem.offset | op->map.va.range) & ~PVR_DEVICE_PAGE_MASK) in pvr_vm_gpuva_map()
355 return -EINVAL; in pvr_vm_gpuva_map()
357 err = pvr_mmu_map(ctx->mmu_op_ctx, op->map.va.range, pvr_gem->flags, in pvr_vm_gpuva_map()
358 op->map.va.addr); in pvr_vm_gpuva_map()
362 drm_gpuva_map(&ctx->vm_ctx->gpuvm_mgr, &ctx->new_va->base, &op->map); in pvr_vm_gpuva_map()
363 drm_gpuva_link(&ctx->new_va->base, ctx->gpuvm_bo); in pvr_vm_gpuva_map()
364 ctx->new_va = NULL; in pvr_vm_gpuva_map()
370 * pvr_vm_gpuva_unmap() - Remove a mapping from a memory context.
386 int err = pvr_mmu_unmap(ctx->mmu_op_ctx, op->unmap.va->va.addr, in pvr_vm_gpuva_unmap()
387 op->unmap.va->va.range); in pvr_vm_gpuva_unmap()
392 drm_gpuva_unmap(&op->unmap); in pvr_vm_gpuva_unmap()
393 drm_gpuva_unlink(op->unmap.va); in pvr_vm_gpuva_unmap()
394 kfree(to_pvr_vm_gpuva(op->unmap.va)); in pvr_vm_gpuva_unmap()
400 * pvr_vm_gpuva_remap() - Remap a mapping within a memory context.
419 drm_gpuva_op_remap_to_unmap_range(&op->remap, &va_start, &va_range); in pvr_vm_gpuva_remap()
420 err = pvr_mmu_unmap(ctx->mmu_op_ctx, va_start, va_range); in pvr_vm_gpuva_remap()
427 drm_gpuva_remap(&ctx->prev_va->base, &ctx->next_va->base, &op->remap); in pvr_vm_gpuva_remap()
429 if (op->remap.prev) { in pvr_vm_gpuva_remap()
430 pvr_gem_object_get(gem_to_pvr_gem(ctx->prev_va->base.gem.obj)); in pvr_vm_gpuva_remap()
431 drm_gpuva_link(&ctx->prev_va->base, ctx->gpuvm_bo); in pvr_vm_gpuva_remap()
432 ctx->prev_va = NULL; in pvr_vm_gpuva_remap()
435 if (op->remap.next) { in pvr_vm_gpuva_remap()
436 pvr_gem_object_get(gem_to_pvr_gem(ctx->next_va->base.gem.obj)); in pvr_vm_gpuva_remap()
437 drm_gpuva_link(&ctx->next_va->base, ctx->gpuvm_bo); in pvr_vm_gpuva_remap()
438 ctx->next_va = NULL; in pvr_vm_gpuva_remap()
441 drm_gpuva_unlink(op->remap.unmap->va); in pvr_vm_gpuva_remap()
442 kfree(to_pvr_vm_gpuva(op->remap.unmap->va)); in pvr_vm_gpuva_remap()
454 * pvr_device_addr_is_valid() - Tests whether a device-virtual address
471 * pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual
473 * @vm_ctx: Target VM context.
478 * @device_addr + @size) to verify a device-virtual address range initially
479 * seems intuitive, but it produces a false-negative when the address range
480 * is right at the end of device-virtual address space.
483 * @size is non-zero.
488 * bounds of the device-virtual address space, and @size is non-zero, or
496 drm_gpuvm_range_valid(&vm_ctx->gpuvm_mgr, device_addr, size) && in pvr_device_addr_and_size_are_valid()
519 fw_mem_ctx->pc_dev_paddr = pvr_vm_get_page_table_root_addr(vm_ctx); in fw_mem_context_init()
520 fw_mem_ctx->page_cat_base_reg_set = ROGUE_FW_BIF_INVALID_PCSET; in fw_mem_context_init()
524 * pvr_vm_create_context() - Create a new VM context.
527 * create a firmware memory context for the VM context
531 * * A handle to the newly-minted VM context on success,
532 * * -%EINVAL if the feature "virtual address space bits" on @pvr_dev is
534 * * -%ENOMEM if allocation of the structure behind the opaque handle fails,
559 return ERR_PTR(-EINVAL); in pvr_vm_create_context()
564 return ERR_PTR(-ENOMEM); in pvr_vm_create_context()
566 vm_ctx->pvr_dev = pvr_dev; in pvr_vm_create_context()
568 vm_ctx->mmu_ctx = pvr_mmu_context_create(pvr_dev); in pvr_vm_create_context()
569 err = PTR_ERR_OR_ZERO(vm_ctx->mmu_ctx); in pvr_vm_create_context()
576 fw_mem_context_init, vm_ctx, &vm_ctx->fw_mem_ctx_obj); in pvr_vm_create_context()
582 drm_gem_private_object_init(&pvr_dev->base, &vm_ctx->dummy_gem, 0); in pvr_vm_create_context()
583 drm_gpuvm_init(&vm_ctx->gpuvm_mgr, in pvr_vm_create_context()
584 is_userspace_context ? "PowerVR-user-VM" : "PowerVR-FW-VM", in pvr_vm_create_context()
585 0, &pvr_dev->base, &vm_ctx->dummy_gem, in pvr_vm_create_context()
588 mutex_init(&vm_ctx->lock); in pvr_vm_create_context()
589 kref_init(&vm_ctx->ref_count); in pvr_vm_create_context()
594 pvr_mmu_context_destroy(vm_ctx->mmu_ctx); in pvr_vm_create_context()
603 * pvr_vm_context_release() - Teardown a VM context.
604 * @ref_count: Pointer to reference counter of the VM context.
615 if (vm_ctx->fw_mem_ctx_obj) in pvr_vm_context_release()
616 pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj); in pvr_vm_context_release()
620 pvr_mmu_context_destroy(vm_ctx->mmu_ctx); in pvr_vm_context_release()
621 drm_gem_private_object_fini(&vm_ctx->dummy_gem); in pvr_vm_context_release()
622 mutex_destroy(&vm_ctx->lock); in pvr_vm_context_release()
624 drm_gpuvm_put(&vm_ctx->gpuvm_mgr); in pvr_vm_context_release()
628 * pvr_vm_context_lookup() - Look up VM context from handle
632 * Takes reference on VM context object. Call pvr_vm_context_put() to release.
636 * * %NULL on failure (object does not exist in list, or is not a VM context)
643 xa_lock(&pvr_file->vm_ctx_handles); in pvr_vm_context_lookup()
644 vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle); in pvr_vm_context_lookup()
646 xa_unlock(&pvr_file->vm_ctx_handles); in pvr_vm_context_lookup()
652 * pvr_vm_context_put() - Release a reference on a VM context
653 * @vm_ctx: Target VM context.
656 * * %true if the VM context was destroyed, or
663 return kref_put(&vm_ctx->ref_count, pvr_vm_context_release); in pvr_vm_context_put()
669 * pvr_destroy_vm_contexts_for_file: Destroy any VM contexts associated with the
673 * Removes all vm_contexts associated with @pvr_file from the device VM context
682 xa_for_each(&pvr_file->vm_ctx_handles, handle, vm_ctx) { in pvr_destroy_vm_contexts_for_file()
684 pvr_vm_context_put(xa_erase(&pvr_file->vm_ctx_handles, handle)); in pvr_destroy_vm_contexts_for_file()
691 struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv; in pvr_vm_lock_extra()
692 struct pvr_gem_object *pvr_obj = bind_op->pvr_obj; in pvr_vm_lock_extra()
695 return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj)); in pvr_vm_lock_extra()
699 * pvr_vm_map() - Map a section of physical memory into a section of
700 * device-virtual memory.
701 * @vm_ctx: Target VM context.
703 * @pvr_obj_offset: Offset into @pvr_obj to map from.
712 * * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
715 * is not device-virtual page-aligned,
726 .vm = &vm_ctx->gpuvm_mgr, in pvr_vm_map()
759 * pvr_vm_unmap_obj_locked() - Unmap an already mapped section of device-virtual
761 * @vm_ctx: Target VM context.
768 * * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
774 * The vm_ctx->lock must be held when calling this function.
783 .vm = &vm_ctx->gpuvm_mgr, in pvr_vm_unmap_obj_locked()
814 * pvr_vm_unmap_obj() - Unmap an already mapped section of device-virtual
816 * @vm_ctx: Target VM context.
831 mutex_lock(&vm_ctx->lock); in pvr_vm_unmap_obj()
833 mutex_unlock(&vm_ctx->lock); in pvr_vm_unmap_obj()
839 * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
840 * @vm_ctx: Target VM context.
856 mutex_lock(&vm_ctx->lock); in pvr_vm_unmap()
858 va = drm_gpuva_find(&vm_ctx->gpuvm_mgr, device_addr, size); in pvr_vm_unmap()
860 pvr_obj = gem_to_pvr_gem(va->gem.obj); in pvr_vm_unmap()
862 va->va.addr, va->va.range); in pvr_vm_unmap()
864 err = -ENOENT; in pvr_vm_unmap()
867 mutex_unlock(&vm_ctx->lock); in pvr_vm_unmap()
873 * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
874 * @vm_ctx: Target VM context.
877 * all in order of ascending device-virtual address.
882 mutex_lock(&vm_ctx->lock); in pvr_vm_unmap_all()
888 va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr, in pvr_vm_unmap_all()
889 vm_ctx->gpuvm_mgr.mm_start, in pvr_vm_unmap_all()
890 vm_ctx->gpuvm_mgr.mm_range); in pvr_vm_unmap_all()
894 pvr_obj = gem_to_pvr_gem(va->gem.obj); in pvr_vm_unmap_all()
897 va->va.addr, va->va.range)); in pvr_vm_unmap_all()
900 mutex_unlock(&vm_ctx->lock); in pvr_vm_unmap_all()
989 if (!args->pointer) { in pvr_static_data_areas_get()
990 args->size = sizeof(struct drm_pvr_dev_query_static_data_areas); in pvr_static_data_areas_get()
994 err = PVR_UOBJ_GET(query, args->size, args->pointer); in pvr_static_data_areas_get()
1012 err = PVR_UOBJ_SET(args->pointer, args->size, query); in pvr_static_data_areas_get()
1016 args->size = sizeof(query); in pvr_static_data_areas_get()
1028 if (!args->pointer) { in pvr_heap_info_get()
1029 args->size = sizeof(struct drm_pvr_dev_query_heap_info); in pvr_heap_info_get()
1033 err = PVR_UOBJ_GET(query, args->size, args->pointer); in pvr_heap_info_get()
1062 err = PVR_UOBJ_SET(args->pointer, args->size, query); in pvr_heap_info_get()
1066 args->size = sizeof(query); in pvr_heap_info_get()
1071 * pvr_heap_contains_range() - Determine if a given heap contains the specified
1072 * device-virtual address range.
1083 return pvr_heap->base <= start && end < pvr_heap->base + pvr_heap->size; in pvr_heap_contains_range()
1087 * pvr_find_heap_containing() - Find a heap which contains the specified
1088 * device-virtual address range.
1104 if (check_add_overflow(start, size - 1, &end)) in pvr_find_heap_containing()
1127 * pvr_vm_find_gem_object() - Look up a buffer object from a given
1128 * device-virtual address.
1129 * @vm_ctx: [IN] Target VM context.
1152 mutex_lock(&vm_ctx->lock); in pvr_vm_find_gem_object()
1154 va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr, device_addr, 1); in pvr_vm_find_gem_object()
1158 pvr_obj = gem_to_pvr_gem(va->gem.obj); in pvr_vm_find_gem_object()
1162 *mapped_offset_out = va->gem.offset; in pvr_vm_find_gem_object()
1164 *mapped_size_out = va->va.range; in pvr_vm_find_gem_object()
1166 mutex_unlock(&vm_ctx->lock); in pvr_vm_find_gem_object()
1171 mutex_unlock(&vm_ctx->lock); in pvr_vm_find_gem_object()
1178 * @vm_ctx: Target VM context.
1182 * * %NULL if this VM context does not have a firmware memory context.
1187 return vm_ctx->fw_mem_ctx_obj; in pvr_vm_get_fw_mem_context()