Lines Matching +full:pre +full:- +full:blending

30 #include <linux/dma-mapping.h>
52 * NV10-NV40 tiling helpers
60 int i = reg - drm->tile.reg; in nv10_bo_update_tile_region()
62 struct nvkm_fb_tile *tile = &fb->tile.region[i]; in nv10_bo_update_tile_region()
64 nouveau_fence_unref(&reg->fence); in nv10_bo_update_tile_region()
66 if (tile->pitch) in nv10_bo_update_tile_region()
79 struct nouveau_drm_tile *tile = &drm->tile.reg[i]; in nv10_bo_get_tile_region()
81 spin_lock(&drm->tile.lock); in nv10_bo_get_tile_region()
83 if (!tile->used && in nv10_bo_get_tile_region()
84 (!tile->fence || nouveau_fence_done(tile->fence))) in nv10_bo_get_tile_region()
85 tile->used = true; in nv10_bo_get_tile_region()
89 spin_unlock(&drm->tile.lock); in nv10_bo_get_tile_region()
100 spin_lock(&drm->tile.lock); in nv10_bo_put_tile_region()
101 tile->fence = (struct nouveau_fence *)dma_fence_get(fence); in nv10_bo_put_tile_region()
102 tile->used = false; in nv10_bo_put_tile_region()
103 spin_unlock(&drm->tile.lock); in nv10_bo_put_tile_region()
116 for (i = 0; i < fb->tile.regions; i++) { in nv10_bo_set_tiling()
123 } else if (tile && fb->tile.region[i].pitch) { in nv10_bo_set_tiling()
139 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_del_ttm()
140 struct drm_device *dev = drm->dev; in nouveau_bo_del_ttm()
143 WARN_ON(nvbo->bo.pin_count > 0); in nouveau_bo_del_ttm()
145 nv10_bo_put_tile_region(dev, nvbo->tile, NULL); in nouveau_bo_del_ttm()
147 if (bo->base.import_attach) in nouveau_bo_del_ttm()
148 drm_prime_gem_destroy(&bo->base, bo->sg); in nouveau_bo_del_ttm()
154 if (bo->base.dev) { in nouveau_bo_del_ttm()
158 if (nvbo->no_share) in nouveau_bo_del_ttm()
159 drm_gem_object_put(nvbo->r_obj); in nouveau_bo_del_ttm()
161 drm_gem_object_release(&bo->base); in nouveau_bo_del_ttm()
163 dma_resv_fini(&bo->base._resv); in nouveau_bo_del_ttm()
172 x += y - 1; in roundup_64()
180 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_fixup_align()
181 struct nvif_device *device = &drm->client.device; in nouveau_bo_fixup_align()
183 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_fixup_align()
184 if (nvbo->mode) { in nouveau_bo_fixup_align()
185 if (device->info.chipset >= 0x40) { in nouveau_bo_fixup_align()
187 *size = roundup_64(*size, 64 * nvbo->mode); in nouveau_bo_fixup_align()
189 } else if (device->info.chipset >= 0x30) { in nouveau_bo_fixup_align()
191 *size = roundup_64(*size, 64 * nvbo->mode); in nouveau_bo_fixup_align()
193 } else if (device->info.chipset >= 0x20) { in nouveau_bo_fixup_align()
195 *size = roundup_64(*size, 64 * nvbo->mode); in nouveau_bo_fixup_align()
197 } else if (device->info.chipset >= 0x10) { in nouveau_bo_fixup_align()
199 *size = roundup_64(*size, 32 * nvbo->mode); in nouveau_bo_fixup_align()
203 *size = roundup_64(*size, (1 << nvbo->page)); in nouveau_bo_fixup_align()
204 *align = max((1 << nvbo->page), *align); in nouveau_bo_fixup_align()
214 struct nouveau_drm *drm = cli->drm; in nouveau_bo_alloc()
216 struct nvif_mmu *mmu = &cli->mmu; in nouveau_bo_alloc()
217 struct nvif_vmm *vmm = &nouveau_cli_vmm(cli)->vmm; in nouveau_bo_alloc()
218 int i, pi = -1; in nouveau_bo_alloc()
222 return ERR_PTR(-EINVAL); in nouveau_bo_alloc()
227 return ERR_PTR(-ENOMEM); in nouveau_bo_alloc()
229 INIT_LIST_HEAD(&nvbo->head); in nouveau_bo_alloc()
230 INIT_LIST_HEAD(&nvbo->entry); in nouveau_bo_alloc()
231 INIT_LIST_HEAD(&nvbo->vma_list); in nouveau_bo_alloc()
232 nvbo->bo.bdev = &drm->ttm.bdev; in nouveau_bo_alloc()
239 /* Determine if we can get a cache-coherent map, forcing in nouveau_bo_alloc()
243 nvbo->force_coherent = true; in nouveau_bo_alloc()
246 nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG); in nouveau_bo_alloc()
248 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) { in nouveau_bo_alloc()
249 nvbo->kind = (tile_flags & 0x0000ff00) >> 8; in nouveau_bo_alloc()
250 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { in nouveau_bo_alloc()
252 return ERR_PTR(-EINVAL); in nouveau_bo_alloc()
255 nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; in nouveau_bo_alloc()
256 } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_alloc()
257 nvbo->kind = (tile_flags & 0x00007f00) >> 8; in nouveau_bo_alloc()
258 nvbo->comp = (tile_flags & 0x00030000) >> 16; in nouveau_bo_alloc()
259 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { in nouveau_bo_alloc()
261 return ERR_PTR(-EINVAL); in nouveau_bo_alloc()
264 nvbo->zeta = (tile_flags & 0x00000007); in nouveau_bo_alloc()
266 nvbo->mode = tile_mode; in nouveau_bo_alloc()
270 for (i = 0; i < vmm->page_nr; i++) { in nouveau_bo_alloc()
273 * size for the buffer up-front, and pre-allocate its in nouveau_bo_alloc()
278 if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && in nouveau_bo_alloc()
279 (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) in nouveau_bo_alloc()
282 (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) in nouveau_bo_alloc()
289 if (pi < 0 || !nvbo->comp || vmm->page[i].comp) in nouveau_bo_alloc()
293 if (*size >= 1ULL << vmm->page[i].shift) in nouveau_bo_alloc()
299 return ERR_PTR(-EINVAL); in nouveau_bo_alloc()
303 if (nvbo->comp && !vmm->page[pi].comp) { in nouveau_bo_alloc()
304 if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100) in nouveau_bo_alloc()
305 nvbo->kind = mmu->kind[nvbo->kind]; in nouveau_bo_alloc()
306 nvbo->comp = 0; in nouveau_bo_alloc()
308 nvbo->page = vmm->page[pi].shift; in nouveau_bo_alloc()
311 for (i = 0; i < vmm->page_nr; i++) { in nouveau_bo_alloc()
314 * size for the buffer up-front, and pre-allocate its in nouveau_bo_alloc()
319 if ((domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) in nouveau_bo_alloc()
322 (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) in nouveau_bo_alloc()
329 if (*size >= 1ULL << vmm->page[i].shift) in nouveau_bo_alloc()
334 return ERR_PTR(-EINVAL); in nouveau_bo_alloc()
336 nvbo->page = vmm->page[pi].shift; in nouveau_bo_alloc()
357 INIT_LIST_HEAD(&nvbo->io_reserve_lru); in nouveau_bo_init()
359 ret = ttm_bo_init_reserved(nvbo->bo.bdev, &nvbo->bo, type, in nouveau_bo_init()
360 &nvbo->placement, align >> PAGE_SHIFT, &ctx, in nouveau_bo_init()
368 ttm_bo_unreserve(&nvbo->bo); in nouveau_bo_init()
387 nvbo->bo.base.size = size; in nouveau_bo_new()
388 dma_resv_init(&nvbo->bo.base._resv); in nouveau_bo_new()
389 drm_vma_node_reset(&nvbo->bo.base.vma_node); in nouveau_bo_new()
394 drm_gem_gpuva_init(&nvbo->bo.base); in nouveau_bo_new()
407 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in set_placement_range()
408 u64 vram_size = drm->client.device.info.ram_size; in set_placement_range()
411 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && in set_placement_range()
412 nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) && in set_placement_range()
413 nvbo->bo.base.size < vram_size / 4) { in set_placement_range()
417 * speed up when alpha-blending and depth-test are enabled in set_placement_range()
420 if (nvbo->zeta) { in set_placement_range()
427 for (i = 0; i < nvbo->placement.num_placement; ++i) { in set_placement_range()
428 nvbo->placements[i].fpfn = fpfn; in set_placement_range()
429 nvbo->placements[i].lpfn = lpfn; in set_placement_range()
438 unsigned int *n = &nvbo->placement.num_placement; in nouveau_bo_placement_set()
439 struct ttm_place *pl = nvbo->placements; in nouveau_bo_placement_set()
463 nvbo->placement.placement = nvbo->placements; in nouveau_bo_placement_set()
469 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_pin_locked()
470 struct ttm_buffer_object *bo = &nvbo->bo; in nouveau_bo_pin_locked()
474 dma_resv_assert_held(bo->base.resv); in nouveau_bo_pin_locked()
476 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && in nouveau_bo_pin_locked()
478 if (!nvbo->contig) { in nouveau_bo_pin_locked()
479 nvbo->contig = true; in nouveau_bo_pin_locked()
485 if (nvbo->bo.pin_count) { in nouveau_bo_pin_locked()
488 switch (bo->resource->mem_type) { in nouveau_bo_pin_locked()
502 bo->resource->mem_type, domain); in nouveau_bo_pin_locked()
503 ret = -EBUSY; in nouveau_bo_pin_locked()
505 ttm_bo_pin(&nvbo->bo); in nouveau_bo_pin_locked()
521 ttm_bo_pin(&nvbo->bo); in nouveau_bo_pin_locked()
523 switch (bo->resource->mem_type) { in nouveau_bo_pin_locked()
525 drm->gem.vram_available -= bo->base.size; in nouveau_bo_pin_locked()
528 drm->gem.gart_available -= bo->base.size; in nouveau_bo_pin_locked()
536 nvbo->contig = false; in nouveau_bo_pin_locked()
542 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_unpin_locked()
543 struct ttm_buffer_object *bo = &nvbo->bo; in nouveau_bo_unpin_locked()
545 dma_resv_assert_held(bo->base.resv); in nouveau_bo_unpin_locked()
547 ttm_bo_unpin(&nvbo->bo); in nouveau_bo_unpin_locked()
548 if (!nvbo->bo.pin_count) { in nouveau_bo_unpin_locked()
549 switch (bo->resource->mem_type) { in nouveau_bo_unpin_locked()
551 drm->gem.vram_available += bo->base.size; in nouveau_bo_unpin_locked()
554 drm->gem.gart_available += bo->base.size; in nouveau_bo_unpin_locked()
564 struct ttm_buffer_object *bo = &nvbo->bo; in nouveau_bo_pin()
578 struct ttm_buffer_object *bo = &nvbo->bo; in nouveau_bo_unpin()
595 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); in nouveau_bo_map()
599 ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size), &nvbo->kmap); in nouveau_bo_map()
601 ttm_bo_unreserve(&nvbo->bo); in nouveau_bo_map()
611 ttm_bo_kunmap(&nvbo->kmap); in nouveau_bo_unmap()
617 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_sync_for_device()
618 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_device()
621 if (!ttm_dma || !ttm_dma->dma_address) in nouveau_bo_sync_for_device()
623 if (!ttm_dma->pages) { in nouveau_bo_sync_for_device()
629 if (nvbo->force_coherent) in nouveau_bo_sync_for_device()
633 while (i < ttm_dma->num_pages) { in nouveau_bo_sync_for_device()
634 struct page *p = ttm_dma->pages[i]; in nouveau_bo_sync_for_device()
637 for (j = i + 1; j < ttm_dma->num_pages; ++j) { in nouveau_bo_sync_for_device()
638 if (++p != ttm_dma->pages[j]) in nouveau_bo_sync_for_device()
643 dma_sync_single_for_device(drm->dev->dev, in nouveau_bo_sync_for_device()
644 ttm_dma->dma_address[i], in nouveau_bo_sync_for_device()
653 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_sync_for_cpu()
654 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_cpu()
657 if (!ttm_dma || !ttm_dma->dma_address) in nouveau_bo_sync_for_cpu()
659 if (!ttm_dma->pages) { in nouveau_bo_sync_for_cpu()
665 if (nvbo->force_coherent) in nouveau_bo_sync_for_cpu()
669 while (i < ttm_dma->num_pages) { in nouveau_bo_sync_for_cpu()
670 struct page *p = ttm_dma->pages[i]; in nouveau_bo_sync_for_cpu()
673 for (j = i + 1; j < ttm_dma->num_pages; ++j) { in nouveau_bo_sync_for_cpu()
674 if (++p != ttm_dma->pages[j]) in nouveau_bo_sync_for_cpu()
680 dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i], in nouveau_bo_sync_for_cpu()
688 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_add_io_reserve_lru()
691 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_bo_add_io_reserve_lru()
692 list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru); in nouveau_bo_add_io_reserve_lru()
693 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_bo_add_io_reserve_lru()
698 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_del_io_reserve_lru()
701 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_bo_del_io_reserve_lru()
702 list_del_init(&nvbo->io_reserve_lru); in nouveau_bo_del_io_reserve_lru()
703 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_bo_del_io_reserve_lru()
713 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx); in nouveau_bo_validate()
726 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_wr16()
740 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_rd32()
754 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_wr32()
768 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_ttm_tt_create()
770 if (drm->agp.bridge) { in nouveau_ttm_tt_create()
771 return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags); in nouveau_ttm_tt_create()
786 return -EINVAL; in nouveau_ttm_tt_bind()
788 if (drm->agp.bridge) in nouveau_ttm_tt_bind()
800 if (drm->agp.bridge) { in nouveau_ttm_tt_unbind()
813 switch (bo->resource->mem_type) { in nouveau_bo_evict_flags()
823 *pl = nvbo->placement; in nouveau_bo_evict_flags()
830 struct nouveau_mem *old_mem = nouveau_mem(bo->resource); in nouveau_bo_move_prep()
832 struct nvif_vmm *vmm = &drm->client.vmm.vmm; in nouveau_bo_move_prep()
835 ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0, in nouveau_bo_move_prep()
836 old_mem->mem.size, &old_mem->vma[0]); in nouveau_bo_move_prep()
840 ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0, in nouveau_bo_move_prep()
841 new_mem->mem.size, &old_mem->vma[1]); in nouveau_bo_move_prep()
845 ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]); in nouveau_bo_move_prep()
849 ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]); in nouveau_bo_move_prep()
852 nvif_vmm_put(vmm, &old_mem->vma[1]); in nouveau_bo_move_prep()
853 nvif_vmm_put(vmm, &old_mem->vma[0]); in nouveau_bo_move_prep()
863 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_move_m2mf()
864 struct nouveau_channel *chan = drm->ttm.chan; in nouveau_bo_move_m2mf()
865 struct nouveau_cli *cli = chan->cli; in nouveau_bo_move_m2mf()
873 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_move_m2mf()
879 if (drm_drv_uses_atomic_modeset(drm->dev)) in nouveau_bo_move_m2mf()
880 mutex_lock(&cli->mutex); in nouveau_bo_move_m2mf()
882 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); in nouveau_bo_move_m2mf()
884 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible); in nouveau_bo_move_m2mf()
888 ret = drm->ttm.move(chan, bo, bo->resource, new_reg); in nouveau_bo_move_m2mf()
905 ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, in nouveau_bo_move_m2mf()
910 mutex_unlock(&cli->mutex); in nouveau_bo_move_m2mf()
958 if (mthd->engine) in nouveau_bo_move_init()
959 chan = drm->cechan; in nouveau_bo_move_init()
961 chan = drm->channel; in nouveau_bo_move_init()
965 ret = nvif_object_ctor(&chan->user, "ttmBoMove", in nouveau_bo_move_init()
966 mthd->oclass | (mthd->engine << 16), in nouveau_bo_move_init()
967 mthd->oclass, NULL, 0, in nouveau_bo_move_init()
968 &drm->ttm.copy); in nouveau_bo_move_init()
970 ret = mthd->init(chan, drm->ttm.copy.handle); in nouveau_bo_move_init()
972 nvif_object_dtor(&drm->ttm.copy); in nouveau_bo_move_init()
976 drm->ttm.move = mthd->exec; in nouveau_bo_move_init()
977 drm->ttm.chan = chan; in nouveau_bo_move_init()
978 name = mthd->name; in nouveau_bo_move_init()
981 } while ((++mthd)->exec); in nouveau_bo_move_init()
995 if (bo->destroy != nouveau_bo_del_ttm) in nouveau_bo_move_ntfy()
1000 if (mem && new_reg->mem_type != TTM_PL_SYSTEM && in nouveau_bo_move_ntfy()
1001 mem->mem.page == nvbo->page) { in nouveau_bo_move_ntfy()
1002 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_bo_move_ntfy()
1007 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_bo_move_ntfy()
1008 ret = dma_resv_wait_timeout(bo->base.resv, in nouveau_bo_move_ntfy()
1018 nvbo->offset = (new_reg->start << PAGE_SHIFT); in nouveau_bo_move_ntfy()
1026 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_vm_bind()
1027 struct drm_device *dev = drm->dev; in nouveau_bo_vm_bind()
1029 u64 offset = new_reg->start << PAGE_SHIFT; in nouveau_bo_vm_bind()
1032 if (new_reg->mem_type != TTM_PL_VRAM) in nouveau_bo_vm_bind()
1035 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { in nouveau_bo_vm_bind()
1036 *new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size, in nouveau_bo_vm_bind()
1037 nvbo->mode, nvbo->zeta); in nouveau_bo_vm_bind()
1048 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_vm_cleanup()
1049 struct drm_device *dev = drm->dev; in nouveau_bo_vm_cleanup()
1053 ret = dma_resv_get_singleton(bo->base.resv, DMA_RESV_USAGE_WRITE, in nouveau_bo_vm_cleanup()
1056 dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_WRITE, in nouveau_bo_vm_cleanup()
1069 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_move()
1071 struct drm_gem_object *obj = &bo->base; in nouveau_bo_move()
1072 struct ttm_resource *old_reg = bo->resource; in nouveau_bo_move()
1076 if (new_reg->mem_type == TTM_PL_TT) { in nouveau_bo_move()
1077 ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg); in nouveau_bo_move()
1088 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_move()
1095 if (!old_reg || (old_reg->mem_type == TTM_PL_SYSTEM && in nouveau_bo_move()
1096 !bo->ttm)) { in nouveau_bo_move()
1101 if (old_reg->mem_type == TTM_PL_SYSTEM && in nouveau_bo_move()
1102 new_reg->mem_type == TTM_PL_TT) { in nouveau_bo_move()
1107 if (old_reg->mem_type == TTM_PL_TT && in nouveau_bo_move()
1108 new_reg->mem_type == TTM_PL_SYSTEM) { in nouveau_bo_move()
1109 nouveau_ttm_tt_unbind(bo->bdev, bo->ttm); in nouveau_bo_move()
1110 ttm_resource_free(bo, &bo->resource); in nouveau_bo_move()
1116 if (drm->ttm.move) { in nouveau_bo_move()
1117 if ((old_reg->mem_type == TTM_PL_SYSTEM && in nouveau_bo_move()
1118 new_reg->mem_type == TTM_PL_VRAM) || in nouveau_bo_move()
1119 (old_reg->mem_type == TTM_PL_VRAM && in nouveau_bo_move()
1120 new_reg->mem_type == TTM_PL_SYSTEM)) { in nouveau_bo_move()
1121 hop->fpfn = 0; in nouveau_bo_move()
1122 hop->lpfn = 0; in nouveau_bo_move()
1123 hop->mem_type = TTM_PL_TT; in nouveau_bo_move()
1124 hop->flags = 0; in nouveau_bo_move()
1125 return -EMULTIHOP; in nouveau_bo_move()
1130 ret = -ENODEV; in nouveau_bo_move()
1138 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_move()
1142 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); in nouveau_bo_move()
1146 nouveau_bo_move_ntfy(bo, bo->resource); in nouveau_bo_move()
1158 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { in nouveau_ttm_io_mem_free_locked()
1159 switch (reg->mem_type) { in nouveau_ttm_io_mem_free_locked()
1161 if (mem->kind) in nouveau_ttm_io_mem_free_locked()
1162 nvif_object_unmap_handle(&mem->mem.object); in nouveau_ttm_io_mem_free_locked()
1165 nvif_object_unmap_handle(&mem->mem.object); in nouveau_ttm_io_mem_free_locked()
1179 struct nvif_mmu *mmu = &drm->client.mmu; in nouveau_ttm_io_mem_reserve()
1182 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_reserve()
1184 switch (reg->mem_type) { in nouveau_ttm_io_mem_reserve()
1191 if (drm->agp.bridge) { in nouveau_ttm_io_mem_reserve()
1192 reg->bus.offset = (reg->start << PAGE_SHIFT) + in nouveau_ttm_io_mem_reserve()
1193 drm->agp.base; in nouveau_ttm_io_mem_reserve()
1194 reg->bus.is_iomem = !drm->agp.cma; in nouveau_ttm_io_mem_reserve()
1195 reg->bus.caching = ttm_write_combined; in nouveau_ttm_io_mem_reserve()
1198 if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || in nouveau_ttm_io_mem_reserve()
1199 !mem->kind) { in nouveau_ttm_io_mem_reserve()
1206 reg->bus.offset = (reg->start << PAGE_SHIFT) + in nouveau_ttm_io_mem_reserve()
1207 device->func->resource_addr(device, 1); in nouveau_ttm_io_mem_reserve()
1208 reg->bus.is_iomem = true; in nouveau_ttm_io_mem_reserve()
1211 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && in nouveau_ttm_io_mem_reserve()
1212 mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED) in nouveau_ttm_io_mem_reserve()
1213 reg->bus.caching = ttm_uncached; in nouveau_ttm_io_mem_reserve()
1215 reg->bus.caching = ttm_write_combined; in nouveau_ttm_io_mem_reserve()
1217 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { in nouveau_ttm_io_mem_reserve()
1225 switch (mem->mem.object.oclass) { in nouveau_ttm_io_mem_reserve()
1229 args.nv50.kind = mem->kind; in nouveau_ttm_io_mem_reserve()
1230 args.nv50.comp = mem->comp; in nouveau_ttm_io_mem_reserve()
1236 args.gf100.kind = mem->kind; in nouveau_ttm_io_mem_reserve()
1244 ret = nvif_object_map_handle(&mem->mem.object, in nouveau_ttm_io_mem_reserve()
1249 ret = -EINVAL; in nouveau_ttm_io_mem_reserve()
1253 reg->bus.offset = handle; in nouveau_ttm_io_mem_reserve()
1258 ret = -EINVAL; in nouveau_ttm_io_mem_reserve()
1262 if (ret == -ENOSPC) { in nouveau_ttm_io_mem_reserve()
1265 nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru, in nouveau_ttm_io_mem_reserve()
1269 list_del_init(&nvbo->io_reserve_lru); in nouveau_ttm_io_mem_reserve()
1270 drm_vma_node_unmap(&nvbo->bo.base.vma_node, in nouveau_ttm_io_mem_reserve()
1271 bdev->dev_mapping); in nouveau_ttm_io_mem_reserve()
1272 nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource); in nouveau_ttm_io_mem_reserve()
1273 nvbo->bo.resource->bus.offset = 0; in nouveau_ttm_io_mem_reserve()
1274 nvbo->bo.resource->bus.addr = NULL; in nouveau_ttm_io_mem_reserve()
1279 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_reserve()
1288 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_free()
1290 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_free()
1295 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_ttm_fault_reserve_notify()
1298 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT; in nouveau_ttm_fault_reserve_notify()
1304 if (bo->resource->mem_type != TTM_PL_VRAM) { in nouveau_ttm_fault_reserve_notify()
1305 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || in nouveau_ttm_fault_reserve_notify()
1306 !nvbo->kind) in nouveau_ttm_fault_reserve_notify()
1309 if (bo->resource->mem_type != TTM_PL_SYSTEM) in nouveau_ttm_fault_reserve_notify()
1316 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA || in nouveau_ttm_fault_reserve_notify()
1317 bo->resource->start + PFN_UP(bo->resource->size) < mappable) in nouveau_ttm_fault_reserve_notify()
1320 for (i = 0; i < nvbo->placement.num_placement; ++i) { in nouveau_ttm_fault_reserve_notify()
1321 nvbo->placements[i].fpfn = 0; in nouveau_ttm_fault_reserve_notify()
1322 nvbo->placements[i].lpfn = mappable; in nouveau_ttm_fault_reserve_notify()
1329 if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS)) in nouveau_ttm_fault_reserve_notify()
1344 bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL); in nouveau_ttm_tt_populate()
1349 if (slave && ttm->sg) { in nouveau_ttm_tt_populate()
1350 drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address, in nouveau_ttm_tt_populate()
1351 ttm->num_pages); in nouveau_ttm_tt_populate()
1357 return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx); in nouveau_ttm_tt_populate()
1365 bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL); in nouveau_ttm_tt_unpopulate()
1374 return ttm_pool_free(&drm->ttm.bdev.pool, ttm); in nouveau_ttm_tt_unpopulate()
1383 if (drm->agp.bridge) { in nouveau_ttm_tt_destroy()
1394 struct dma_resv *resv = nvbo->bo.base.resv; in nouveau_bo_fence()
1399 dma_resv_add_fence(resv, &fence->base, exclusive ? in nouveau_bo_fence()