Lines Matching +full:scatter +full:- +full:gather
1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/dma-buf.h>
61 if (dev->driver->gem_create_object) { in __drm_gem_shmem_create()
62 obj = dev->driver->gem_create_object(dev, size); in __drm_gem_shmem_create()
69 return ERR_PTR(-ENOMEM); in __drm_gem_shmem_create()
70 obj = &shmem->base; in __drm_gem_shmem_create()
73 if (!obj->funcs) in __drm_gem_shmem_create()
74 obj->funcs = &drm_gem_shmem_funcs; in __drm_gem_shmem_create()
78 shmem->map_wc = false; /* dma-buf mappings use always writecombine */ in __drm_gem_shmem_create()
91 INIT_LIST_HEAD(&shmem->madv_list); in __drm_gem_shmem_create()
101 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | in __drm_gem_shmem_create()
115 * drm_gem_shmem_create - Allocate an object with the given size
122 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
132 * drm_gem_shmem_create_with_mnt - Allocate an object with the given size in a
141 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
153 * drm_gem_shmem_free - Free resources associated with a shmem GEM object
161 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_free()
163 if (obj->import_attach) { in drm_gem_shmem_free()
164 drm_prime_gem_destroy(obj, shmem->sgt); in drm_gem_shmem_free()
166 dma_resv_lock(shmem->base.resv, NULL); in drm_gem_shmem_free()
168 drm_WARN_ON(obj->dev, shmem->vmap_use_count); in drm_gem_shmem_free()
170 if (shmem->sgt) { in drm_gem_shmem_free()
171 dma_unmap_sgtable(obj->dev->dev, shmem->sgt, in drm_gem_shmem_free()
173 sg_free_table(shmem->sgt); in drm_gem_shmem_free()
174 kfree(shmem->sgt); in drm_gem_shmem_free()
176 if (shmem->pages) in drm_gem_shmem_free()
179 drm_WARN_ON(obj->dev, shmem->pages_use_count); in drm_gem_shmem_free()
181 dma_resv_unlock(shmem->base.resv); in drm_gem_shmem_free()
191 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_get_pages()
194 dma_resv_assert_held(shmem->base.resv); in drm_gem_shmem_get_pages()
196 if (shmem->pages_use_count++ > 0) in drm_gem_shmem_get_pages()
201 drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n", in drm_gem_shmem_get_pages()
203 shmem->pages_use_count = 0; in drm_gem_shmem_get_pages()
213 if (shmem->map_wc) in drm_gem_shmem_get_pages()
214 set_pages_array_wc(pages, obj->size >> PAGE_SHIFT); in drm_gem_shmem_get_pages()
217 shmem->pages = pages; in drm_gem_shmem_get_pages()
223 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
230 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_put_pages()
232 dma_resv_assert_held(shmem->base.resv); in drm_gem_shmem_put_pages()
234 if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) in drm_gem_shmem_put_pages()
237 if (--shmem->pages_use_count > 0) in drm_gem_shmem_put_pages()
241 if (shmem->map_wc) in drm_gem_shmem_put_pages()
242 set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT); in drm_gem_shmem_put_pages()
245 drm_gem_put_pages(obj, shmem->pages, in drm_gem_shmem_put_pages()
246 shmem->pages_mark_dirty_on_put, in drm_gem_shmem_put_pages()
247 shmem->pages_mark_accessed_on_put); in drm_gem_shmem_put_pages()
248 shmem->pages = NULL; in drm_gem_shmem_put_pages()
256 dma_resv_assert_held(shmem->base.resv); in drm_gem_shmem_pin_locked()
258 drm_WARN_ON(shmem->base.dev, shmem->base.import_attach); in drm_gem_shmem_pin_locked()
268 dma_resv_assert_held(shmem->base.resv); in drm_gem_shmem_unpin_locked()
275 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
286 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_pin()
289 drm_WARN_ON(obj->dev, obj->import_attach); in drm_gem_shmem_pin()
291 ret = dma_resv_lock_interruptible(shmem->base.resv, NULL); in drm_gem_shmem_pin()
295 dma_resv_unlock(shmem->base.resv); in drm_gem_shmem_pin()
302 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
310 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_unpin()
312 drm_WARN_ON(obj->dev, obj->import_attach); in drm_gem_shmem_unpin()
314 dma_resv_lock(shmem->base.resv, NULL); in drm_gem_shmem_unpin()
316 dma_resv_unlock(shmem->base.resv); in drm_gem_shmem_unpin()
321 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
328 * between dma-buf imported and natively allocated objects.
338 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_vmap()
341 if (obj->import_attach) { in drm_gem_shmem_vmap()
342 ret = dma_buf_vmap(obj->import_attach->dmabuf, map); in drm_gem_shmem_vmap()
344 if (drm_WARN_ON(obj->dev, map->is_iomem)) { in drm_gem_shmem_vmap()
345 dma_buf_vunmap(obj->import_attach->dmabuf, map); in drm_gem_shmem_vmap()
346 return -EIO; in drm_gem_shmem_vmap()
352 dma_resv_assert_held(shmem->base.resv); in drm_gem_shmem_vmap()
354 if (shmem->vmap_use_count++ > 0) { in drm_gem_shmem_vmap()
355 iosys_map_set_vaddr(map, shmem->vaddr); in drm_gem_shmem_vmap()
363 if (shmem->map_wc) in drm_gem_shmem_vmap()
365 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, in drm_gem_shmem_vmap()
367 if (!shmem->vaddr) in drm_gem_shmem_vmap()
368 ret = -ENOMEM; in drm_gem_shmem_vmap()
370 iosys_map_set_vaddr(map, shmem->vaddr); in drm_gem_shmem_vmap()
374 drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret); in drm_gem_shmem_vmap()
381 if (!obj->import_attach) in drm_gem_shmem_vmap()
384 shmem->vmap_use_count = 0; in drm_gem_shmem_vmap()
391 * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
399 * This function hides the differences between dma-buf imported and natively
405 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_vunmap()
407 if (obj->import_attach) { in drm_gem_shmem_vunmap()
408 dma_buf_vunmap(obj->import_attach->dmabuf, map); in drm_gem_shmem_vunmap()
410 dma_resv_assert_held(shmem->base.resv); in drm_gem_shmem_vunmap()
412 if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count)) in drm_gem_shmem_vunmap()
415 if (--shmem->vmap_use_count > 0) in drm_gem_shmem_vunmap()
418 vunmap(shmem->vaddr); in drm_gem_shmem_vunmap()
422 shmem->vaddr = NULL; in drm_gem_shmem_vunmap()
442 ret = drm_gem_handle_create(file_priv, &shmem->base, handle); in drm_gem_shmem_create_with_handle()
443 /* drop reference from allocate - handle holds it now. */ in drm_gem_shmem_create_with_handle()
444 drm_gem_object_put(&shmem->base); in drm_gem_shmem_create_with_handle()
450 * false or -errno.
454 dma_resv_assert_held(shmem->base.resv); in drm_gem_shmem_madvise()
456 if (shmem->madv >= 0) in drm_gem_shmem_madvise()
457 shmem->madv = madv; in drm_gem_shmem_madvise()
459 madv = shmem->madv; in drm_gem_shmem_madvise()
467 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_purge()
468 struct drm_device *dev = obj->dev; in drm_gem_shmem_purge()
470 dma_resv_assert_held(shmem->base.resv); in drm_gem_shmem_purge()
472 drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem)); in drm_gem_shmem_purge()
474 dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); in drm_gem_shmem_purge()
475 sg_free_table(shmem->sgt); in drm_gem_shmem_purge()
476 kfree(shmem->sgt); in drm_gem_shmem_purge()
477 shmem->sgt = NULL; in drm_gem_shmem_purge()
481 shmem->madv = -1; in drm_gem_shmem_purge()
483 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); in drm_gem_shmem_purge()
491 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); in drm_gem_shmem_purge()
493 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); in drm_gem_shmem_purge()
498 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
517 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); in drm_gem_shmem_dumb_create()
519 if (!args->pitch || !args->size) { in drm_gem_shmem_dumb_create()
520 args->pitch = min_pitch; in drm_gem_shmem_dumb_create()
521 args->size = PAGE_ALIGN(args->pitch * args->height); in drm_gem_shmem_dumb_create()
524 if (args->pitch < min_pitch) in drm_gem_shmem_dumb_create()
525 args->pitch = min_pitch; in drm_gem_shmem_dumb_create()
526 if (args->size < args->pitch * args->height) in drm_gem_shmem_dumb_create()
527 args->size = PAGE_ALIGN(args->pitch * args->height); in drm_gem_shmem_dumb_create()
530 return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle); in drm_gem_shmem_dumb_create()
536 struct vm_area_struct *vma = vmf->vma; in drm_gem_shmem_fault()
537 struct drm_gem_object *obj = vma->vm_private_data; in drm_gem_shmem_fault()
539 loff_t num_pages = obj->size >> PAGE_SHIFT; in drm_gem_shmem_fault()
544 /* We don't use vmf->pgoff since that has the fake offset */ in drm_gem_shmem_fault()
545 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in drm_gem_shmem_fault()
547 dma_resv_lock(shmem->base.resv, NULL); in drm_gem_shmem_fault()
550 drm_WARN_ON_ONCE(obj->dev, !shmem->pages) || in drm_gem_shmem_fault()
551 shmem->madv < 0) { in drm_gem_shmem_fault()
554 page = shmem->pages[page_offset]; in drm_gem_shmem_fault()
556 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); in drm_gem_shmem_fault()
559 dma_resv_unlock(shmem->base.resv); in drm_gem_shmem_fault()
566 struct drm_gem_object *obj = vma->vm_private_data; in drm_gem_shmem_vm_open()
569 drm_WARN_ON(obj->dev, obj->import_attach); in drm_gem_shmem_vm_open()
571 dma_resv_lock(shmem->base.resv, NULL); in drm_gem_shmem_vm_open()
578 if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) in drm_gem_shmem_vm_open()
579 shmem->pages_use_count++; in drm_gem_shmem_vm_open()
581 dma_resv_unlock(shmem->base.resv); in drm_gem_shmem_vm_open()
588 struct drm_gem_object *obj = vma->vm_private_data; in drm_gem_shmem_vm_close()
591 dma_resv_lock(shmem->base.resv, NULL); in drm_gem_shmem_vm_close()
593 dma_resv_unlock(shmem->base.resv); in drm_gem_shmem_vm_close()
606 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
618 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_mmap()
621 if (obj->import_attach) { in drm_gem_shmem_mmap()
623 * vm_ops pointing to our implementation if the dma-buf backend in drm_gem_shmem_mmap()
626 vma->vm_private_data = NULL; in drm_gem_shmem_mmap()
627 vma->vm_ops = NULL; in drm_gem_shmem_mmap()
629 ret = dma_buf_mmap(obj->dma_buf, vma, 0); in drm_gem_shmem_mmap()
638 if (is_cow_mapping(vma->vm_flags)) in drm_gem_shmem_mmap()
639 return -EINVAL; in drm_gem_shmem_mmap()
641 dma_resv_lock(shmem->base.resv, NULL); in drm_gem_shmem_mmap()
643 dma_resv_unlock(shmem->base.resv); in drm_gem_shmem_mmap()
649 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in drm_gem_shmem_mmap()
650 if (shmem->map_wc) in drm_gem_shmem_mmap()
651 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in drm_gem_shmem_mmap()
658 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
666 if (shmem->base.import_attach) in drm_gem_shmem_print_info()
669 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); in drm_gem_shmem_print_info()
670 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); in drm_gem_shmem_print_info()
671 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); in drm_gem_shmem_print_info()
676 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
680 * This function exports a scatter/gather table suitable for PRIME usage by
683 * Drivers who need to acquire an scatter/gather table for objects need to call
687 * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
691 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_get_sg_table()
693 drm_WARN_ON(obj->dev, obj->import_attach); in drm_gem_shmem_get_sg_table()
695 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT); in drm_gem_shmem_get_sg_table()
701 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_get_pages_sgt_locked()
705 if (shmem->sgt) in drm_gem_shmem_get_pages_sgt_locked()
706 return shmem->sgt; in drm_gem_shmem_get_pages_sgt_locked()
708 drm_WARN_ON(obj->dev, obj->import_attach); in drm_gem_shmem_get_pages_sgt_locked()
720 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0); in drm_gem_shmem_get_pages_sgt_locked()
724 shmem->sgt = sgt; in drm_gem_shmem_get_pages_sgt_locked()
737 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
738 * scatter/gather table for a shmem GEM object.
741 * This function returns a scatter/gather table suitable for driver usage. If
742 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
746 * and difference between dma-buf imported and natively allocated objects.
750 * A pointer to the scatter/gather table of pinned pages or errno on failure.
757 ret = dma_resv_lock_interruptible(shmem->base.resv, NULL); in drm_gem_shmem_get_pages_sgt()
761 dma_resv_unlock(shmem->base.resv); in drm_gem_shmem_get_pages_sgt()
768 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
769 * another driver's scatter/gather table of pinned pages
771 * @attach: DMA-BUF attachment
772 * @sgt: Scatter/gather table of pinned pages
774 * This function imports a scatter/gather table exported via DMA-BUF by
779 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
787 size_t size = PAGE_ALIGN(attach->dmabuf->size); in drm_gem_shmem_prime_import_sg_table()
794 shmem->sgt = sgt; in drm_gem_shmem_prime_import_sg_table()
798 return &shmem->base; in drm_gem_shmem_prime_import_sg_table()
802 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");