Lines Matching +full:non +full:- +full:continuous

1 // SPDX-License-Identifier: GPL-2.0-or-later
11 #include <linux/dma-mapping.h>
12 #include <linux/dma-map-ops.h>
34 __GFP_RETRY_MAYFAIL | /* don't trigger OOM-killer */ \
35 __GFP_NOWARN) /* no stack trace print - this call is non-critical */
43 if (WARN_ON_ONCE(!ops || !ops->alloc)) in __snd_dma_alloc_pages()
45 return ops->alloc(dmab, size); in __snd_dma_alloc_pages()
49 * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
57 * Calls the memory-allocator function for the corresponding
68 return -ENXIO; in snd_dma_alloc_dir_pages()
70 return -ENXIO; in snd_dma_alloc_dir_pages()
73 dmab->dev.type = type; in snd_dma_alloc_dir_pages()
74 dmab->dev.dev = device; in snd_dma_alloc_dir_pages()
75 dmab->dev.dir = dir; in snd_dma_alloc_dir_pages()
76 dmab->bytes = 0; in snd_dma_alloc_dir_pages()
77 dmab->addr = 0; in snd_dma_alloc_dir_pages()
78 dmab->private_data = NULL; in snd_dma_alloc_dir_pages()
79 dmab->area = __snd_dma_alloc_pages(dmab, size); in snd_dma_alloc_dir_pages()
80 if (!dmab->area) in snd_dma_alloc_dir_pages()
81 return -ENOMEM; in snd_dma_alloc_dir_pages()
82 dmab->bytes = size; in snd_dma_alloc_dir_pages()
88 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
94 * Calls the memory-allocator function for the corresponding
108 if (err != -ENOMEM) in snd_dma_alloc_pages_fallback()
111 return -ENOMEM; in snd_dma_alloc_pages_fallback()
115 if (! dmab->area) in snd_dma_alloc_pages_fallback()
116 return -ENOMEM; in snd_dma_alloc_pages_fallback()
122 * snd_dma_free_pages - release the allocated buffer
131 if (ops && ops->free) in snd_dma_free_pages()
132 ops->free(dmab); in snd_dma_free_pages()
143 * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
185 * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
197 return -ENOENT; in snd_dma_buffer_mmap()
199 if (ops && ops->mmap) in snd_dma_buffer_mmap()
200 return ops->mmap(dmab, area); in snd_dma_buffer_mmap()
202 return -ENOENT; in snd_dma_buffer_mmap()
208 * snd_dma_buffer_sync - sync DMA buffer between CPU and device
217 if (!dmab || !dmab->dev.need_sync) in snd_dma_buffer_sync()
220 if (ops && ops->sync) in snd_dma_buffer_sync()
221 ops->sync(dmab, mode); in snd_dma_buffer_sync()
227 * snd_sgbuf_get_addr - return the physical address at the corresponding offset
237 if (ops && ops->get_addr) in snd_sgbuf_get_addr()
238 return ops->get_addr(dmab, offset); in snd_sgbuf_get_addr()
240 return dmab->addr + offset; in snd_sgbuf_get_addr()
245 * snd_sgbuf_get_page - return the physical page at the corresponding offset
255 if (ops && ops->get_page) in snd_sgbuf_get_page()
256 return ops->get_page(dmab, offset); in snd_sgbuf_get_page()
258 return virt_to_page(dmab->area + offset); in snd_sgbuf_get_page()
263 * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
264 * on sg-buffer
276 if (ops && ops->get_chunk_size) in snd_sgbuf_get_chunk_size()
277 return ops->get_chunk_size(dmab, ofs, size); in snd_sgbuf_get_chunk_size()
284 * Continuous pages allocator
299 if ((*addr + size - 1) & ~dev->coherent_dma_mask) { in do_alloc_pages()
328 return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, false); in snd_dma_continuous_alloc()
333 do_free_pages(dmab->area, dmab->bytes, false); in snd_dma_continuous_free()
339 return remap_pfn_range(area, area->vm_start, in snd_dma_continuous_mmap()
340 dmab->addr >> PAGE_SHIFT, in snd_dma_continuous_mmap()
341 area->vm_end - area->vm_start, in snd_dma_continuous_mmap()
342 area->vm_page_prot); in snd_dma_continuous_mmap()
361 vfree(dmab->area); in snd_dma_vmalloc_free()
367 return remap_vmalloc_range(area, dmab->area, 0); in snd_dma_vmalloc_mmap()
371 page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
382 return vmalloc_to_page(dmab->area + offset); in snd_dma_vmalloc_get_page()
393 end = ofs + size - 1; /* the last byte address */ in snd_dma_vmalloc_get_chunk_size()
402 return start - ofs; in snd_dma_vmalloc_get_chunk_size()
404 /* ok, all on continuous pages */ in snd_dma_vmalloc_get_chunk_size()
424 struct device *dev = dmab->dev.dev; in snd_dma_iram_alloc()
428 if (dev->of_node) { in snd_dma_iram_alloc()
429 pool = of_gen_pool_get(dev->of_node, "iram", 0); in snd_dma_iram_alloc()
431 dmab->private_data = pool; in snd_dma_iram_alloc()
433 p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE); in snd_dma_iram_alloc()
441 dmab->dev.type = SNDRV_DMA_TYPE_DEV; in snd_dma_iram_alloc()
447 struct gen_pool *pool = dmab->private_data; in snd_dma_iram_free()
449 if (pool && dmab->area) in snd_dma_iram_free()
450 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); in snd_dma_iram_free()
456 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); in snd_dma_iram_mmap()
457 return remap_pfn_range(area, area->vm_start, in snd_dma_iram_mmap()
458 dmab->addr >> PAGE_SHIFT, in snd_dma_iram_mmap()
459 area->vm_end - area->vm_start, in snd_dma_iram_mmap()
460 area->vm_page_prot); in snd_dma_iram_mmap()
475 return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); in snd_dma_dev_alloc()
480 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); in snd_dma_dev_free()
486 return dma_mmap_coherent(dmab->dev.dev, area, in snd_dma_dev_mmap()
487 dmab->area, dmab->addr, dmab->bytes); in snd_dma_dev_mmap()
497 * Write-combined pages
500 /* x86-specific allocations */
503 void *p = do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true); in snd_dma_wc_alloc()
507 dmab->addr = dma_map_single(dmab->dev.dev, p, size, DMA_BIDIRECTIONAL); in snd_dma_wc_alloc()
508 if (dma_mapping_error(dmab->dev.dev, dmab->addr)) { in snd_dma_wc_alloc()
509 do_free_pages(dmab->area, size, true); in snd_dma_wc_alloc()
517 dma_unmap_single(dmab->dev.dev, dmab->addr, dmab->bytes, in snd_dma_wc_free()
519 do_free_pages(dmab->area, dmab->bytes, true); in snd_dma_wc_free()
525 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); in snd_dma_wc_mmap()
526 return dma_mmap_coherent(dmab->dev.dev, area, in snd_dma_wc_mmap()
527 dmab->area, dmab->addr, dmab->bytes); in snd_dma_wc_mmap()
532 return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); in snd_dma_wc_alloc()
537 dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); in snd_dma_wc_free()
543 return dma_mmap_wc(dmab->dev.dev, area, in snd_dma_wc_mmap()
544 dmab->area, dmab->addr, dmab->bytes); in snd_dma_wc_mmap()
555 * Non-contiguous pages allocator
562 sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir, in snd_dma_noncontig_alloc()
567 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, in snd_dma_noncontig_alloc()
568 sg_dma_address(sgt->sgl)); in snd_dma_noncontig_alloc()
569 p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt); in snd_dma_noncontig_alloc()
571 dmab->private_data = sgt; in snd_dma_noncontig_alloc()
573 dmab->addr = snd_sgbuf_get_addr(dmab, 0); in snd_dma_noncontig_alloc()
575 dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir); in snd_dma_noncontig_alloc()
582 dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area); in snd_dma_noncontig_free()
583 dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data, in snd_dma_noncontig_free()
584 dmab->dev.dir); in snd_dma_noncontig_free()
590 return dma_mmap_noncontiguous(dmab->dev.dev, area, in snd_dma_noncontig_mmap()
591 dmab->bytes, dmab->private_data); in snd_dma_noncontig_mmap()
598 if (dmab->dev.dir == DMA_TO_DEVICE) in snd_dma_noncontig_sync()
600 invalidate_kernel_vmap_range(dmab->area, dmab->bytes); in snd_dma_noncontig_sync()
601 dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data, in snd_dma_noncontig_sync()
602 dmab->dev.dir); in snd_dma_noncontig_sync()
604 if (dmab->dev.dir == DMA_FROM_DEVICE) in snd_dma_noncontig_sync()
606 flush_kernel_vmap_range(dmab->area, dmab->bytes); in snd_dma_noncontig_sync()
607 dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data, in snd_dma_noncontig_sync()
608 dmab->dev.dir); in snd_dma_noncontig_sync()
616 struct sg_table *sgt = dmab->private_data; in snd_dma_noncontig_iter_set()
618 __sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents, in snd_dma_noncontig_iter_set()
651 end = ofs + size - 1; /* the last byte address */ in snd_dma_noncontig_get_chunk_size()
664 return start - ofs; in snd_dma_noncontig_get_chunk_size()
666 /* ok, all on continuous pages */ in snd_dma_noncontig_get_chunk_size()
681 /* Fallback SG-buffer allocations for x86 */
683 struct sg_table sgt; /* used by get_addr - must be the first item */
692 bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG; in __snd_dma_sg_fallback_free()
695 if (sgbuf->pages && sgbuf->npages) { in __snd_dma_sg_fallback_free()
697 while (i < sgbuf->count) { in __snd_dma_sg_fallback_free()
698 size = sgbuf->npages[i]; in __snd_dma_sg_fallback_free()
701 do_free_pages(page_address(sgbuf->pages[i]), in __snd_dma_sg_fallback_free()
706 kvfree(sgbuf->pages); in __snd_dma_sg_fallback_free()
707 kvfree(sgbuf->npages); in __snd_dma_sg_fallback_free()
714 bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG; in snd_dma_sg_fallback_alloc()
726 sgbuf->count = size >> PAGE_SHIFT; in snd_dma_sg_fallback_alloc()
727 sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL); in snd_dma_sg_fallback_alloc()
728 sgbuf->npages = kvcalloc(sgbuf->count, sizeof(*sgbuf->npages), GFP_KERNEL); in snd_dma_sg_fallback_alloc()
729 if (!sgbuf->pages || !sgbuf->npages) in snd_dma_sg_fallback_alloc()
732 pagep = sgbuf->pages; in snd_dma_sg_fallback_alloc()
737 p = do_alloc_pages(dmab->dev.dev, chunk, &addr, wc); in snd_dma_sg_fallback_alloc()
746 size -= chunk; in snd_dma_sg_fallback_alloc()
749 sgbuf->npages[idx] = npages; in snd_dma_sg_fallback_alloc()
752 while (npages--) in snd_dma_sg_fallback_alloc()
756 if (sg_alloc_table_from_pages(&sgbuf->sgt, sgbuf->pages, sgbuf->count, in snd_dma_sg_fallback_alloc()
757 0, sgbuf->count << PAGE_SHIFT, GFP_KERNEL)) in snd_dma_sg_fallback_alloc()
760 if (dma_map_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0)) in snd_dma_sg_fallback_alloc()
763 p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL); in snd_dma_sg_fallback_alloc()
767 dmab->private_data = sgbuf; in snd_dma_sg_fallback_alloc()
769 dmab->addr = snd_sgbuf_get_addr(dmab, 0); in snd_dma_sg_fallback_alloc()
773 dma_unmap_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0); in snd_dma_sg_fallback_alloc()
775 sg_free_table(&sgbuf->sgt); in snd_dma_sg_fallback_alloc()
783 struct snd_dma_sg_fallback *sgbuf = dmab->private_data; in snd_dma_sg_fallback_free()
785 vunmap(dmab->area); in snd_dma_sg_fallback_free()
786 dma_unmap_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0); in snd_dma_sg_fallback_free()
787 sg_free_table(&sgbuf->sgt); in snd_dma_sg_fallback_free()
788 __snd_dma_sg_fallback_free(dmab, dmab->private_data); in snd_dma_sg_fallback_free()
794 struct snd_dma_sg_fallback *sgbuf = dmab->private_data; in snd_dma_sg_fallback_mmap()
796 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) in snd_dma_sg_fallback_mmap()
797 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); in snd_dma_sg_fallback_mmap()
798 return vm_map_pages(area, sgbuf->pages, sgbuf->count); in snd_dma_sg_fallback_mmap()
803 int type = dmab->dev.type; in snd_dma_sg_alloc()
808 dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC; in snd_dma_sg_alloc()
810 dmab->dev.type = SNDRV_DMA_TYPE_DEV; in snd_dma_sg_alloc()
815 dmab->dev.type = type; /* restore the type */ in snd_dma_sg_alloc()
832 * Non-coherent pages allocator
838 p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr, in snd_dma_noncoherent_alloc()
839 dmab->dev.dir, DEFAULT_GFP); in snd_dma_noncoherent_alloc()
841 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr); in snd_dma_noncoherent_alloc()
847 dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area, in snd_dma_noncoherent_free()
848 dmab->addr, dmab->dev.dir); in snd_dma_noncoherent_free()
854 area->vm_page_prot = vm_get_page_prot(area->vm_flags); in snd_dma_noncoherent_mmap()
855 return dma_mmap_pages(dmab->dev.dev, area, in snd_dma_noncoherent_mmap()
856 area->vm_end - area->vm_start, in snd_dma_noncoherent_mmap()
857 virt_to_page(dmab->area)); in snd_dma_noncoherent_mmap()
864 if (dmab->dev.dir != DMA_TO_DEVICE) in snd_dma_noncoherent_sync()
865 dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr, in snd_dma_noncoherent_sync()
866 dmab->bytes, dmab->dev.dir); in snd_dma_noncoherent_sync()
868 if (dmab->dev.dir != DMA_FROM_DEVICE) in snd_dma_noncoherent_sync()
869 dma_sync_single_for_device(dmab->dev.dev, dmab->addr, in snd_dma_noncoherent_sync()
870 dmab->bytes, dmab->dev.dir); in snd_dma_noncoherent_sync()
908 if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN || in snd_dma_get_ops()
909 dmab->dev.type >= ARRAY_SIZE(snd_dma_ops))) in snd_dma_get_ops()
911 return snd_dma_ops[dmab->dev.type]; in snd_dma_get_ops()