Lines Matching +full:multi +full:- +full:tt

1 // SPDX-License-Identifier: MIT
8 #include <linux/dma-buf.h>
79 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe); in resource_is_stolen_vram()
84 return mem_type_is_vram(res->mem_type); in resource_is_vram()
89 return resource_is_vram(bo->ttm.resource) || in xe_bo_is_vram()
90 resource_is_stolen_vram(xe_bo_device(bo), bo->ttm.resource); in xe_bo_is_vram()
95 return bo->ttm.resource->mem_type == XE_PL_STOLEN; in xe_bo_is_stolen()
99 * xe_bo_has_single_placement - check if BO is placed only in one memory location
109 return bo->placement.num_placement == 1; in xe_bo_has_single_placement()
113 * xe_bo_is_stolen_devmem - check if BO is of stolen type accessed via PCI BAR
129 return bo->flags & XE_BO_FLAG_USER; in xe_bo_is_user()
138 tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)]; in mem_type_to_migrate()
139 return tile->migrate; in mem_type_to_migrate()
144 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); in res_to_mem_region()
148 mgr = ttm_manager_type(&xe->ttm, res->mem_type); in res_to_mem_region()
149 return to_xe_ttm_vram_mgr(mgr)->vram; in res_to_mem_region()
156 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_system()
158 bo->placements[*c] = (struct ttm_place) { in try_add_system()
181 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in add_vram()
183 vram = to_xe_ttm_vram_mgr(ttm_manager_type(&xe->ttm, mem_type))->vram; in add_vram()
184 xe_assert(xe, vram && vram->usable_size); in add_vram()
185 io_size = vram->io_size; in add_vram()
190 if (io_size < vram->usable_size) { in add_vram()
206 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c); in try_add_vram()
208 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c); in try_add_vram()
215 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_stolen()
217 bo->placements[*c] = (struct ttm_place) { in try_add_stolen()
236 return -EINVAL; in __xe_bo_placement_for_flags()
238 bo->placement = (struct ttm_placement) { in __xe_bo_placement_for_flags()
240 .placement = bo->placements, in __xe_bo_placement_for_flags()
258 if (tbo->type == ttm_bo_type_sg) { in xe_evict_flags()
259 placement->num_placement = 0; in xe_evict_flags()
271 switch (tbo->resource->mem_type) { in xe_evict_flags()
293 static int xe_tt_map_sg(struct ttm_tt *tt) in xe_tt_map_sg() argument
295 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); in xe_tt_map_sg()
296 unsigned long num_pages = tt->num_pages; in xe_tt_map_sg()
299 XE_WARN_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL); in xe_tt_map_sg()
301 if (xe_tt->sg) in xe_tt_map_sg()
304 ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages, in xe_tt_map_sg()
307 xe_sg_segment_size(xe_tt->dev), in xe_tt_map_sg()
312 xe_tt->sg = &xe_tt->sgt; in xe_tt_map_sg()
313 ret = dma_map_sgtable(xe_tt->dev, xe_tt->sg, DMA_BIDIRECTIONAL, in xe_tt_map_sg()
316 sg_free_table(xe_tt->sg); in xe_tt_map_sg()
317 xe_tt->sg = NULL; in xe_tt_map_sg()
324 static void xe_tt_unmap_sg(struct ttm_tt *tt) in xe_tt_unmap_sg() argument
326 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); in xe_tt_unmap_sg()
328 if (xe_tt->sg) { in xe_tt_unmap_sg()
329 dma_unmap_sgtable(xe_tt->dev, xe_tt->sg, in xe_tt_unmap_sg()
331 sg_free_table(xe_tt->sg); in xe_tt_unmap_sg()
332 xe_tt->sg = NULL; in xe_tt_unmap_sg()
338 struct ttm_tt *tt = bo->ttm.ttm; in xe_bo_sg() local
339 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); in xe_bo_sg()
341 return xe_tt->sg; in xe_bo_sg()
349 struct xe_ttm_tt *tt; in xe_ttm_tt_create() local
354 tt = kzalloc(sizeof(*tt), GFP_KERNEL); in xe_ttm_tt_create()
355 if (!tt) in xe_ttm_tt_create()
358 tt->dev = xe->drm.dev; in xe_ttm_tt_create()
362 extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size), in xe_ttm_tt_create()
372 switch (bo->cpu_caching) { in xe_ttm_tt_create()
381 WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching); in xe_ttm_tt_create()
384 * Display scanout is always non-coherent with the CPU cache. in xe_ttm_tt_create()
387 * non-coherent and require a CPU:WC mapping. in xe_ttm_tt_create()
389 if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) || in xe_ttm_tt_create()
390 (xe->info.graphics_verx100 >= 1270 && in xe_ttm_tt_create()
391 bo->flags & XE_BO_FLAG_PAGETABLE)) in xe_ttm_tt_create()
395 if (bo->flags & XE_BO_FLAG_NEEDS_UC) { in xe_ttm_tt_create()
397 * Valid only for internally-created buffers only, for in xe_ttm_tt_create()
400 xe_assert(xe, bo->cpu_caching == 0); in xe_ttm_tt_create()
404 err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages); in xe_ttm_tt_create()
406 kfree(tt); in xe_ttm_tt_create()
410 return &tt->ttm; in xe_ttm_tt_create()
413 static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt, in xe_ttm_tt_populate() argument
419 * dma-bufs are not populated with pages, and the dma- in xe_ttm_tt_populate()
422 if (tt->page_flags & TTM_TT_FLAG_EXTERNAL) in xe_ttm_tt_populate()
425 err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx); in xe_ttm_tt_populate()
432 static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt) in xe_ttm_tt_unpopulate() argument
434 if (tt->page_flags & TTM_TT_FLAG_EXTERNAL) in xe_ttm_tt_unpopulate()
437 xe_tt_unmap_sg(tt); in xe_ttm_tt_unpopulate()
439 return ttm_pool_free(&ttm_dev->pool, tt); in xe_ttm_tt_unpopulate()
442 static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt) in xe_ttm_tt_destroy() argument
444 ttm_tt_fini(tt); in xe_ttm_tt_destroy()
445 kfree(tt); in xe_ttm_tt_destroy()
453 return vres->used_visible_size == mem->size; in xe_ttm_resource_visible()
461 switch (mem->mem_type) { in xe_ttm_io_mem_reserve()
470 return -EINVAL; in xe_ttm_io_mem_reserve()
472 mem->bus.offset = mem->start << PAGE_SHIFT; in xe_ttm_io_mem_reserve()
474 if (vram->mapping && in xe_ttm_io_mem_reserve()
475 mem->placement & TTM_PL_FLAG_CONTIGUOUS) in xe_ttm_io_mem_reserve()
476 mem->bus.addr = (u8 __force *)vram->mapping + in xe_ttm_io_mem_reserve()
477 mem->bus.offset; in xe_ttm_io_mem_reserve()
479 mem->bus.offset += vram->io_start; in xe_ttm_io_mem_reserve()
480 mem->bus.is_iomem = true; in xe_ttm_io_mem_reserve()
483 mem->bus.caching = ttm_write_combined; in xe_ttm_io_mem_reserve()
489 return -EINVAL; in xe_ttm_io_mem_reserve()
498 struct drm_gem_object *obj = &bo->ttm.base; in xe_bo_trigger_rebind()
503 dma_resv_assert_held(bo->ttm.base.resv); in xe_bo_trigger_rebind()
505 if (!list_empty(&bo->ttm.base.gpuva.list)) { in xe_bo_trigger_rebind()
506 dma_resv_iter_begin(&cursor, bo->ttm.base.resv, in xe_bo_trigger_rebind()
514 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); in xe_bo_trigger_rebind()
525 if (ctx->no_wait_gpu && in xe_bo_trigger_rebind()
526 !dma_resv_test_signaled(bo->ttm.base.resv, in xe_bo_trigger_rebind()
528 return -EBUSY; in xe_bo_trigger_rebind()
530 timeout = dma_resv_wait_timeout(bo->ttm.base.resv, in xe_bo_trigger_rebind()
532 ctx->interruptible, in xe_bo_trigger_rebind()
535 return -ETIME; in xe_bo_trigger_rebind()
556 * The dma-buf map_attachment() / unmap_attachment() is hooked up here.
567 struct dma_buf_attachment *attach = ttm_bo->base.import_attach; in xe_bo_move_dmabuf()
568 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt, in xe_bo_move_dmabuf()
570 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move_dmabuf()
574 xe_assert(xe, ttm_bo->ttm); in xe_bo_move_dmabuf()
576 if (new_res->mem_type == XE_PL_SYSTEM) in xe_bo_move_dmabuf()
579 if (ttm_bo->sg) { in xe_bo_move_dmabuf()
580 dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL); in xe_bo_move_dmabuf()
581 ttm_bo->sg = NULL; in xe_bo_move_dmabuf()
588 ttm_bo->sg = sg; in xe_bo_move_dmabuf()
589 xe_tt->sg = sg; in xe_bo_move_dmabuf()
598 * xe_bo_move_notify - Notify subsystems of a pending move
612 * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode,
618 struct ttm_buffer_object *ttm_bo = &bo->ttm; in xe_bo_move_notify()
619 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move_notify()
620 struct ttm_resource *old_mem = ttm_bo->resource; in xe_bo_move_notify()
621 u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM; in xe_bo_move_notify()
630 return -EINVAL; in xe_bo_move_notify()
637 /* Don't call move_notify() for imported dma-bufs. */ in xe_bo_move_notify()
638 if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach) in xe_bo_move_notify()
639 dma_buf_move_notify(ttm_bo->base.dma_buf); in xe_bo_move_notify()
647 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_bo_move_notify()
648 if (!list_empty(&bo->vram_userfault_link)) in xe_bo_move_notify()
649 list_del_init(&bo->vram_userfault_link); in xe_bo_move_notify()
650 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_bo_move_notify()
661 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move()
663 struct ttm_resource *old_mem = ttm_bo->resource; in xe_bo_move()
664 u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM; in xe_bo_move()
665 struct ttm_tt *ttm = ttm_bo->ttm; in xe_bo_move()
675 /* Bo creation path, moving to system or TT. */ in xe_bo_move()
677 if (new_mem->mem_type == XE_PL_TT) in xe_bo_move()
684 if (ttm_bo->type == ttm_bo_type_sg) { in xe_bo_move()
692 (ttm->page_flags & TTM_TT_FLAG_SWAPPED)); in xe_bo_move()
694 move_lacks_source = !old_mem || (handle_system_ccs ? (!bo->ccs_cleared) : in xe_bo_move()
697 needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) || in xe_bo_move()
698 (!ttm && ttm_bo->type == ttm_bo_type_device); in xe_bo_move()
700 if (new_mem->mem_type == XE_PL_TT) { in xe_bo_move()
711 if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT && !handle_system_ccs) { in xe_bo_move()
717 * Failed multi-hop where the old_mem is still marked as in xe_bo_move()
721 new_mem->mem_type == XE_PL_TT) { in xe_bo_move()
733 new_mem->mem_type == XE_PL_SYSTEM) { in xe_bo_move()
734 long timeout = dma_resv_wait_timeout(ttm_bo->base.resv, in xe_bo_move()
752 new_mem->mem_type == XE_PL_SYSTEM))) { in xe_bo_move()
753 hop->fpfn = 0; in xe_bo_move()
754 hop->lpfn = 0; in xe_bo_move()
755 hop->mem_type = XE_PL_TT; in xe_bo_move()
756 hop->flags = TTM_PL_FLAG_TEMPORARY; in xe_bo_move()
757 ret = -EMULTIHOP; in xe_bo_move()
761 if (bo->tile) in xe_bo_move()
762 migrate = bo->tile->migrate; in xe_bo_move()
764 migrate = mem_type_to_migrate(xe, new_mem->mem_type); in xe_bo_move()
768 migrate = xe->tiles[0].migrate; in xe_bo_move()
771 trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source); in xe_bo_move()
779 drm_WARN_ON(&xe->drm, handle_system_ccs); in xe_bo_move()
800 void __iomem *new_addr = vram->mapping + in xe_bo_move()
801 (new_mem->start << PAGE_SHIFT); in xe_bo_move()
803 if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) { in xe_bo_move()
804 ret = -EINVAL; in xe_bo_move()
809 xe_assert(xe, new_mem->start == in xe_bo_move()
810 bo->placements->fpfn); in xe_bo_move()
812 iosys_map_set_vaddr_iomem(&bo->vmap, new_addr); in xe_bo_move()
819 if (mem_type_is_vram(new_mem->mem_type)) in xe_bo_move()
845 * bo->resource == NULL, so just attach the in xe_bo_move()
848 dma_resv_add_fence(ttm_bo->base.resv, fence, in xe_bo_move()
859 if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) && in xe_bo_move()
860 ttm_bo->ttm) { in xe_bo_move()
861 long timeout = dma_resv_wait_timeout(ttm_bo->base.resv, in xe_bo_move()
868 xe_tt_unmap_sg(ttm_bo->ttm); in xe_bo_move()
875 * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
881 * suspend-resume.
903 if (WARN_ON(!bo->ttm.resource)) in xe_bo_evict_pinned()
904 return -EINVAL; in xe_bo_evict_pinned()
907 return -EINVAL; in xe_bo_evict_pinned()
912 ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx); in xe_bo_evict_pinned()
916 if (!bo->ttm.ttm) { in xe_bo_evict_pinned()
917 bo->ttm.ttm = xe_ttm_tt_create(&bo->ttm, 0); in xe_bo_evict_pinned()
918 if (!bo->ttm.ttm) { in xe_bo_evict_pinned()
919 ret = -ENOMEM; in xe_bo_evict_pinned()
924 ret = ttm_bo_populate(&bo->ttm, &ctx); in xe_bo_evict_pinned()
928 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1); in xe_bo_evict_pinned()
932 ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL); in xe_bo_evict_pinned()
939 ttm_resource_free(&bo->ttm, &new_mem); in xe_bo_evict_pinned()
944 * xe_bo_restore_pinned() - Restore a pinned VRAM object
950 * suspend-resume.
961 struct ttm_place *place = &bo->placements[0]; in xe_bo_restore_pinned()
966 if (WARN_ON(!bo->ttm.resource)) in xe_bo_restore_pinned()
967 return -EINVAL; in xe_bo_restore_pinned()
970 return -EINVAL; in xe_bo_restore_pinned()
973 return -EINVAL; in xe_bo_restore_pinned()
975 if (WARN_ON(!bo->ttm.ttm && !xe_bo_is_stolen(bo))) in xe_bo_restore_pinned()
976 return -EINVAL; in xe_bo_restore_pinned()
978 if (!mem_type_is_vram(place->mem_type)) in xe_bo_restore_pinned()
981 ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx); in xe_bo_restore_pinned()
985 ret = ttm_bo_populate(&bo->ttm, &ctx); in xe_bo_restore_pinned()
989 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1); in xe_bo_restore_pinned()
993 ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL); in xe_bo_restore_pinned()
1000 ttm_resource_free(&bo->ttm, &new_mem); in xe_bo_restore_pinned()
1011 if (ttm_bo->resource->mem_type == XE_PL_STOLEN) in xe_ttm_io_mem_pfn()
1014 vram = res_to_mem_region(ttm_bo->resource); in xe_ttm_io_mem_pfn()
1015 xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor); in xe_ttm_io_mem_pfn()
1016 return (vram->io_start + cursor.start) >> PAGE_SHIFT; in xe_ttm_io_mem_pfn()
1027 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_lock_in_destructor()
1030 xe_assert(xe, !kref_read(&ttm_bo->kref)); in xe_ttm_bo_lock_in_destructor()
1038 spin_lock(&ttm_bo->bdev->lru_lock); in xe_ttm_bo_lock_in_destructor()
1039 locked = dma_resv_trylock(ttm_bo->base.resv); in xe_ttm_bo_lock_in_destructor()
1040 spin_unlock(&ttm_bo->bdev->lru_lock); in xe_ttm_bo_lock_in_destructor()
1057 xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount))); in xe_ttm_bo_release_notify()
1063 if (ttm_bo->base.resv != &ttm_bo->base._resv) in xe_ttm_bo_release_notify()
1075 dma_resv_for_each_fence(&cursor, ttm_bo->base.resv, in xe_ttm_bo_release_notify()
1082 dma_resv_replace_fences(ttm_bo->base.resv, in xe_ttm_bo_release_notify()
1083 fence->context, in xe_ttm_bo_release_notify()
1090 dma_resv_unlock(ttm_bo->base.resv); in xe_ttm_bo_release_notify()
1100 * dma-buf attachment. in xe_ttm_bo_delete_mem_notify()
1102 if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) { in xe_ttm_bo_delete_mem_notify()
1103 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, in xe_ttm_bo_delete_mem_notify()
1106 dma_buf_unmap_attachment(ttm_bo->base.import_attach, ttm_bo->sg, in xe_ttm_bo_delete_mem_notify()
1108 ttm_bo->sg = NULL; in xe_ttm_bo_delete_mem_notify()
1109 xe_tt->sg = NULL; in xe_ttm_bo_delete_mem_notify()
1115 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_purge()
1117 if (ttm_bo->ttm) { in xe_ttm_bo_purge()
1121 drm_WARN_ON(&xe->drm, ret); in xe_ttm_bo_purge()
1132 if (ttm_bo->ttm) { in xe_ttm_bo_swap_notify()
1134 container_of(ttm_bo->ttm, struct xe_ttm_tt, ttm); in xe_ttm_bo_swap_notify()
1136 if (xe_tt->purgeable) in xe_ttm_bo_swap_notify()
1146 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_access_memory()
1155 if (!mem_type_is_vram(ttm_bo->resource->mem_type)) in xe_ttm_access_memory()
1156 return -EIO; in xe_ttm_access_memory()
1158 /* FIXME: Use GPU for non-visible VRAM */ in xe_ttm_access_memory()
1159 if (!xe_ttm_resource_visible(ttm_bo->resource)) in xe_ttm_access_memory()
1160 return -EIO; in xe_ttm_access_memory()
1162 vram = res_to_mem_region(ttm_bo->resource); in xe_ttm_access_memory()
1163 xe_res_first(ttm_bo->resource, offset & PAGE_MASK, in xe_ttm_access_memory()
1164 bo->size - (offset & PAGE_MASK), &cursor); in xe_ttm_access_memory()
1168 int byte_count = min((int)(PAGE_SIZE - page_offset), bytes_left); in xe_ttm_access_memory()
1170 iosys_map_set_vaddr_iomem(&vmap, (u8 __iomem *)vram->mapping + in xe_ttm_access_memory()
1179 bytes_left -= byte_count; in xe_ttm_access_memory()
1206 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_destroy()
1210 if (bo->ttm.base.import_attach) in xe_ttm_bo_destroy()
1211 drm_prime_gem_destroy(&bo->ttm.base, NULL); in xe_ttm_bo_destroy()
1212 drm_gem_object_release(&bo->ttm.base); in xe_ttm_bo_destroy()
1214 xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list)); in xe_ttm_bo_destroy()
1217 if (bo->ggtt_node[id] && bo->ggtt_node[id]->base.size) in xe_ttm_bo_destroy()
1218 xe_ggtt_remove_bo(tile->mem.ggtt, bo); in xe_ttm_bo_destroy()
1221 if (bo->client) in xe_ttm_bo_destroy()
1225 if (bo->vm && xe_bo_is_user(bo)) in xe_ttm_bo_destroy()
1226 xe_vm_put(bo->vm); in xe_ttm_bo_destroy()
1228 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_ttm_bo_destroy()
1229 if (!list_empty(&bo->vram_userfault_link)) in xe_ttm_bo_destroy()
1230 list_del(&bo->vram_userfault_link); in xe_ttm_bo_destroy()
1231 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_ttm_bo_destroy()
1260 if (bo->vm && !xe_vm_in_fault_mode(bo->vm)) { in xe_gem_object_close()
1264 ttm_bo_set_bulk_move(&bo->ttm, NULL); in xe_gem_object_close()
1271 struct ttm_buffer_object *tbo = vmf->vma->vm_private_data; in xe_gem_fault()
1272 struct drm_device *ddev = tbo->base.dev; in xe_gem_fault()
1275 bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK; in xe_gem_fault()
1289 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, in xe_gem_fault()
1293 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); in xe_gem_fault()
1296 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) in xe_gem_fault()
1301 if (ret == VM_FAULT_NOPAGE && mem_type_is_vram(tbo->resource->mem_type)) { in xe_gem_fault()
1302 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_gem_fault()
1303 if (list_empty(&bo->vram_userfault_link)) in xe_gem_fault()
1304 list_add(&bo->vram_userfault_link, &xe->mem_access.vram_userfault.list); in xe_gem_fault()
1305 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_gem_fault()
1308 dma_resv_unlock(tbo->base.resv); in xe_gem_fault()
1319 struct ttm_buffer_object *ttm_bo = vma->vm_private_data; in xe_bo_vm_access()
1332 * xe_bo_read() - Read from an xe_bo
1346 ret = ttm_bo_access(&bo->ttm, offset, dst, size, 0); in xe_bo_read()
1348 ret = -EIO; in xe_bo_read()
1371 * xe_bo_alloc - Allocate storage for a struct xe_bo
1381 * ERR_PTR(-ENOMEM) on error.
1388 return ERR_PTR(-ENOMEM); in xe_bo_alloc()
1394 * xe_bo_free - Free storage allocated using xe_bo_alloc()
1397 * Refer to xe_bo_alloc() documentation for valid use-cases.
1425 return ERR_PTR(-EINVAL); in ___xe_bo_create_locked()
1430 return ERR_PTR(-EINVAL); in ___xe_bo_create_locked()
1434 ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) || in ___xe_bo_create_locked()
1450 return ERR_PTR(-EINVAL); in ___xe_bo_create_locked()
1458 bo->ccs_cleared = false; in ___xe_bo_create_locked()
1459 bo->tile = tile; in ___xe_bo_create_locked()
1460 bo->size = size; in ___xe_bo_create_locked()
1461 bo->flags = flags; in ___xe_bo_create_locked()
1462 bo->cpu_caching = cpu_caching; in ___xe_bo_create_locked()
1463 bo->ttm.base.funcs = &xe_gem_object_funcs; in ___xe_bo_create_locked()
1464 bo->ttm.priority = XE_BO_PRIORITY_NORMAL; in ___xe_bo_create_locked()
1465 INIT_LIST_HEAD(&bo->pinned_link); in ___xe_bo_create_locked()
1467 INIT_LIST_HEAD(&bo->client_link); in ___xe_bo_create_locked()
1469 INIT_LIST_HEAD(&bo->vram_userfault_link); in ___xe_bo_create_locked()
1471 drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size); in ___xe_bo_create_locked()
1479 err = __xe_bo_placement_for_flags(xe, bo, bo->flags); in ___xe_bo_create_locked()
1481 xe_ttm_bo_destroy(&bo->ttm); in ___xe_bo_create_locked()
1488 bo->flags & XE_BO_FLAG_DEFER_BACKING) ? &sys_placement : in ___xe_bo_create_locked()
1489 &bo->placement; in ___xe_bo_create_locked()
1490 err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type, in ___xe_bo_create_locked()
1500 * dma-resv using the DMA_RESV_USAGE_KERNEL slot. in ___xe_bo_create_locked()
1514 long timeout = dma_resv_wait_timeout(bo->ttm.base.resv, in ___xe_bo_create_locked()
1521 dma_resv_unlock(bo->ttm.base.resv); in ___xe_bo_create_locked()
1527 bo->created = true; in ___xe_bo_create_locked()
1529 ttm_bo_set_bulk_move(&bo->ttm, bulk); in ___xe_bo_create_locked()
1531 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); in ___xe_bo_create_locked()
1541 struct ttm_place *place = bo->placements; in __xe_bo_fixed_placement()
1544 return -EINVAL; in __xe_bo_fixed_placement()
1546 place->flags = TTM_PL_FLAG_CONTIGUOUS; in __xe_bo_fixed_placement()
1547 place->fpfn = start >> PAGE_SHIFT; in __xe_bo_fixed_placement()
1548 place->lpfn = end >> PAGE_SHIFT; in __xe_bo_fixed_placement()
1552 place->mem_type = XE_PL_VRAM0; in __xe_bo_fixed_placement()
1555 place->mem_type = XE_PL_VRAM1; in __xe_bo_fixed_placement()
1558 place->mem_type = XE_PL_STOLEN; in __xe_bo_fixed_placement()
1563 return -EINVAL; in __xe_bo_fixed_placement()
1566 bo->placement = (struct ttm_placement) { in __xe_bo_fixed_placement()
1603 &vm->lru_bulk_move : NULL, size, in __xe_bo_create_locked()
1608 bo->min_align = alignment; in __xe_bo_create_locked()
1619 bo->vm = vm; in __xe_bo_create_locked()
1621 if (bo->flags & XE_BO_FLAG_GGTT) { in __xe_bo_create_locked()
1625 if (!(bo->flags & XE_BO_FLAG_GGTT_ALL)) { in __xe_bo_create_locked()
1633 if (t != tile && !(bo->flags & XE_BO_FLAG_GGTTx(t))) in __xe_bo_create_locked()
1637 err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo, in __xe_bo_create_locked()
1638 start + bo->size, U64_MAX); in __xe_bo_create_locked()
1640 err = xe_ggtt_insert_bo(t->mem.ggtt, bo); in __xe_bo_create_locked()
1768 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); in xe_bo_create_from_data()
1788 ret = devm_add_action_or_reset(xe->drm.dev, __xe_bo_unpin_map_no_vm, bo); in xe_managed_bo_create_pin_map()
1803 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); in xe_managed_bo_create_from_data()
1826 dst_flags |= (*src)->flags & XE_BO_FLAG_GGTT_INVALIDATE; in xe_managed_bo_reinit_in_vram()
1829 xe_assert(xe, !(*src)->vmap.is_iomem); in xe_managed_bo_reinit_in_vram()
1831 bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr, in xe_managed_bo_reinit_in_vram()
1832 (*src)->size, dst_flags); in xe_managed_bo_reinit_in_vram()
1836 devm_release_action(xe->drm.dev, __xe_bo_unpin_map_no_vm, *src); in xe_managed_bo_reinit_in_vram()
1848 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); in vram_region_gpu_offset()
1850 if (res->mem_type == XE_PL_STOLEN) in vram_region_gpu_offset()
1853 return res_to_mem_region(res)->dpa_base; in vram_region_gpu_offset()
1857 * xe_bo_pin_external - pin an external BO
1860 * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
1871 xe_assert(xe, !bo->vm); in xe_bo_pin_external()
1880 spin_lock(&xe->pinned.lock); in xe_bo_pin_external()
1881 list_add_tail(&bo->pinned_link, in xe_bo_pin_external()
1882 &xe->pinned.external_vram); in xe_bo_pin_external()
1883 spin_unlock(&xe->pinned.lock); in xe_bo_pin_external()
1887 ttm_bo_pin(&bo->ttm); in xe_bo_pin_external()
1893 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); in xe_bo_pin_external()
1900 struct ttm_place *place = &bo->placements[0]; in xe_bo_pin()
1908 xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED | in xe_bo_pin()
1912 * No reason we can't support pinning imported dma-bufs we just don't in xe_bo_pin()
1913 * expect to pin an imported dma-buf. in xe_bo_pin()
1915 xe_assert(xe, !bo->ttm.base.import_attach); in xe_bo_pin()
1930 bo->flags & XE_BO_FLAG_INTERNAL_TEST)) { in xe_bo_pin()
1931 if (mem_type_is_vram(place->mem_type)) { in xe_bo_pin()
1932 xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS); in xe_bo_pin()
1934 place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) - in xe_bo_pin()
1935 vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT; in xe_bo_pin()
1936 place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT); in xe_bo_pin()
1940 if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) { in xe_bo_pin()
1941 spin_lock(&xe->pinned.lock); in xe_bo_pin()
1942 list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present); in xe_bo_pin()
1943 spin_unlock(&xe->pinned.lock); in xe_bo_pin()
1946 ttm_bo_pin(&bo->ttm); in xe_bo_pin()
1952 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); in xe_bo_pin()
1958 * xe_bo_unpin_external - unpin an external BO
1961 * Unpin an external (not tied to a VM, can be exported via dma-buf / prime FD)
1971 xe_assert(xe, !bo->vm); in xe_bo_unpin_external()
1975 spin_lock(&xe->pinned.lock); in xe_bo_unpin_external()
1976 if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link)) in xe_bo_unpin_external()
1977 list_del_init(&bo->pinned_link); in xe_bo_unpin_external()
1978 spin_unlock(&xe->pinned.lock); in xe_bo_unpin_external()
1980 ttm_bo_unpin(&bo->ttm); in xe_bo_unpin_external()
1986 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); in xe_bo_unpin_external()
1991 struct ttm_place *place = &bo->placements[0]; in xe_bo_unpin()
1994 xe_assert(xe, !bo->ttm.base.import_attach); in xe_bo_unpin()
1997 if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) { in xe_bo_unpin()
1998 spin_lock(&xe->pinned.lock); in xe_bo_unpin()
1999 xe_assert(xe, !list_empty(&bo->pinned_link)); in xe_bo_unpin()
2000 list_del_init(&bo->pinned_link); in xe_bo_unpin()
2001 spin_unlock(&xe->pinned.lock); in xe_bo_unpin()
2003 ttm_bo_unpin(&bo->ttm); in xe_bo_unpin()
2007 * xe_bo_validate() - Make sure the bo is in an allowed placement
2020 * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
2031 lockdep_assert_held(&vm->lock); in xe_bo_validate()
2039 return ttm_bo_validate(&bo->ttm, &bo->placement, &ctx); in xe_bo_validate()
2044 if (bo->destroy == &xe_ttm_bo_destroy) in xe_bo_is_xe_bo()
2064 offset &= (PAGE_SIZE - 1); in __xe_bo_addr()
2067 xe_assert(xe, bo->ttm.ttm); in __xe_bo_addr()
2075 xe_res_first(bo->ttm.resource, page << PAGE_SHIFT, in __xe_bo_addr()
2077 return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource); in __xe_bo_addr()
2083 if (!READ_ONCE(bo->ttm.pin_count)) in xe_bo_addr()
2090 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_vmap()
2097 if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) || in xe_bo_vmap()
2098 !force_contiguous(bo->flags))) in xe_bo_vmap()
2099 return -EINVAL; in xe_bo_vmap()
2101 if (!iosys_map_is_null(&bo->vmap)) in xe_bo_vmap()
2111 ret = ttm_bo_kmap(&bo->ttm, 0, bo->size >> PAGE_SHIFT, &bo->kmap); in xe_bo_vmap()
2115 virtual = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); in xe_bo_vmap()
2117 iosys_map_set_vaddr_iomem(&bo->vmap, (void __iomem *)virtual); in xe_bo_vmap()
2119 iosys_map_set_vaddr(&bo->vmap, virtual); in xe_bo_vmap()
2126 if (!iosys_map_is_null(&bo->vmap)) { in __xe_bo_vunmap()
2127 iosys_map_clear(&bo->vmap); in __xe_bo_vunmap()
2128 ttm_bo_kunmap(&bo->kmap); in __xe_bo_vunmap()
2150 if (XE_IOCTL_DBG(xe, args->extensions) || in xe_gem_create_ioctl()
2151 XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) || in xe_gem_create_ioctl()
2152 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_gem_create_ioctl()
2153 return -EINVAL; in xe_gem_create_ioctl()
2156 if (XE_IOCTL_DBG(xe, (args->placement & ~xe->info.mem_region_mask) || in xe_gem_create_ioctl()
2157 !args->placement)) in xe_gem_create_ioctl()
2158 return -EINVAL; in xe_gem_create_ioctl()
2160 if (XE_IOCTL_DBG(xe, args->flags & in xe_gem_create_ioctl()
2164 return -EINVAL; in xe_gem_create_ioctl()
2166 if (XE_IOCTL_DBG(xe, args->handle)) in xe_gem_create_ioctl()
2167 return -EINVAL; in xe_gem_create_ioctl()
2169 if (XE_IOCTL_DBG(xe, !args->size)) in xe_gem_create_ioctl()
2170 return -EINVAL; in xe_gem_create_ioctl()
2172 if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX)) in xe_gem_create_ioctl()
2173 return -EINVAL; in xe_gem_create_ioctl()
2175 if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK)) in xe_gem_create_ioctl()
2176 return -EINVAL; in xe_gem_create_ioctl()
2179 if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING) in xe_gem_create_ioctl()
2182 if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT) in xe_gem_create_ioctl()
2185 bo_flags |= args->placement << (ffs(XE_BO_FLAG_SYSTEM) - 1); in xe_gem_create_ioctl()
2190 !(xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) && in xe_gem_create_ioctl()
2191 IS_ALIGNED(args->size, SZ_64K)) in xe_gem_create_ioctl()
2194 if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) { in xe_gem_create_ioctl()
2196 return -EINVAL; in xe_gem_create_ioctl()
2201 if (XE_IOCTL_DBG(xe, !args->cpu_caching || in xe_gem_create_ioctl()
2202 args->cpu_caching > DRM_XE_GEM_CPU_CACHING_WC)) in xe_gem_create_ioctl()
2203 return -EINVAL; in xe_gem_create_ioctl()
2206 args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC)) in xe_gem_create_ioctl()
2207 return -EINVAL; in xe_gem_create_ioctl()
2210 args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) in xe_gem_create_ioctl()
2211 return -EINVAL; in xe_gem_create_ioctl()
2213 if (args->vm_id) { in xe_gem_create_ioctl()
2214 vm = xe_vm_lookup(xef, args->vm_id); in xe_gem_create_ioctl()
2216 return -ENOENT; in xe_gem_create_ioctl()
2222 bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching, in xe_gem_create_ioctl()
2233 err = drm_gem_handle_create(file, &bo->ttm.base, &handle); in xe_gem_create_ioctl()
2237 args->handle = handle; in xe_gem_create_ioctl()
2262 if (XE_IOCTL_DBG(xe, args->extensions) || in xe_gem_mmap_offset_ioctl()
2263 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_gem_mmap_offset_ioctl()
2264 return -EINVAL; in xe_gem_mmap_offset_ioctl()
2266 if (XE_IOCTL_DBG(xe, args->flags)) in xe_gem_mmap_offset_ioctl()
2267 return -EINVAL; in xe_gem_mmap_offset_ioctl()
2269 gem_obj = drm_gem_object_lookup(file, args->handle); in xe_gem_mmap_offset_ioctl()
2271 return -ENOENT; in xe_gem_mmap_offset_ioctl()
2274 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); in xe_gem_mmap_offset_ioctl()
2281 * xe_bo_lock() - Lock the buffer object's dma_resv object
2288 * Return: 0 on success, -EINTR if @intr is true and the wait for a
2295 return dma_resv_lock_interruptible(bo->ttm.base.resv, NULL); in xe_bo_lock()
2297 dma_resv_lock(bo->ttm.base.resv, NULL); in xe_bo_lock()
2303 * xe_bo_unlock() - Unlock the buffer object's dma_resv object
2310 dma_resv_unlock(bo->ttm.base.resv); in xe_bo_unlock()
2314 * xe_bo_can_migrate - Whether a buffer object likely can be migrated
2332 if (bo->ttm.type == ttm_bo_type_kernel) in xe_bo_can_migrate()
2335 if (bo->ttm.type == ttm_bo_type_sg) in xe_bo_can_migrate()
2338 for (cur_place = 0; cur_place < bo->placement.num_placement; in xe_bo_can_migrate()
2340 if (bo->placements[cur_place].mem_type == mem_type) in xe_bo_can_migrate()
2350 place->mem_type = mem_type; in xe_place_from_ttm_type()
2354 * xe_bo_migrate - Migrate an object to the desired region id
2366 * return -EINTR or -ERESTARTSYS if signal pending.
2370 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_migrate()
2381 if (bo->ttm.resource->mem_type == mem_type) in xe_bo_migrate()
2385 return -EBUSY; in xe_bo_migrate()
2388 return -EINVAL; in xe_bo_migrate()
2398 drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN); in xe_bo_migrate()
2403 add_vram(xe, bo, &requested, bo->flags, mem_type, &c); in xe_bo_migrate()
2406 return ttm_bo_validate(&bo->ttm, &placement, &ctx); in xe_bo_migrate()
2410 * xe_bo_evict - Evict an object to evict placement
2430 xe_evict_flags(&bo->ttm, &placement); in xe_bo_evict()
2431 ret = ttm_bo_validate(&bo->ttm, &placement, &ctx); in xe_bo_evict()
2435 dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL, in xe_bo_evict()
2442 * xe_bo_needs_ccs_pages - Whether a bo needs to back up CCS pages when
2455 if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device) in xe_bo_needs_ccs_pages()
2461 * non-VRAM addresses. in xe_bo_needs_ccs_pages()
2463 if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM)) in xe_bo_needs_ccs_pages()
2470 * __xe_bo_release_dummy() - Dummy kref release function
2480 * xe_bo_put_commit() - Put bos whose put was deferred by xe_bo_put_deferred().
2500 drm_gem_object_free(&bo->ttm.base.refcount); in xe_bo_put_commit()
2511 if (bo->client) in xe_bo_put()
2512 might_lock(&bo->client->bos_lock); in xe_bo_put()
2515 if (bo->ggtt_node[id] && bo->ggtt_node[id]->ggtt) in xe_bo_put()
2516 might_lock(&bo->ggtt_node[id]->ggtt->lock); in xe_bo_put()
2517 drm_gem_object_put(&bo->ttm.base); in xe_bo_put()
2522 * xe_bo_dumb_create - Create a dumb bo as backing for a fb
2538 int cpp = DIV_ROUND_UP(args->bpp, 8); in xe_bo_dumb_create()
2541 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K); in xe_bo_dumb_create()
2543 args->pitch = ALIGN(args->width * cpp, 64); in xe_bo_dumb_create()
2544 args->size = ALIGN(mul_u32_u32(args->pitch, args->height), in xe_bo_dumb_create()
2547 bo = xe_bo_create_user(xe, NULL, NULL, args->size, in xe_bo_dumb_create()
2555 err = drm_gem_handle_create(file_priv, &bo->ttm.base, &handle); in xe_bo_dumb_create()
2556 /* drop reference from allocate - handle holds it now */ in xe_bo_dumb_create()
2557 drm_gem_object_put(&bo->ttm.base); in xe_bo_dumb_create()
2559 args->handle = handle; in xe_bo_dumb_create()
2565 struct ttm_buffer_object *tbo = &bo->ttm; in xe_bo_runtime_pm_release_mmap_offset()
2566 struct ttm_device *bdev = tbo->bdev; in xe_bo_runtime_pm_release_mmap_offset()
2568 drm_vma_node_unmap(&tbo->base.vma_node, bdev->dev_mapping); in xe_bo_runtime_pm_release_mmap_offset()
2570 list_del_init(&bo->vram_userfault_link); in xe_bo_runtime_pm_release_mmap_offset()