Lines Matching +full:no +full:- +full:unaligned +full:- +full:direct +full:- +full:access

1 // SPDX-License-Identifier: GPL-2.0
3 * arch-independent dma-mapping routines
10 #include <linux/dma-map-ops.h>
13 #include <linux/iommu-dma.h>
19 #include "direct.h"
44 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, in dmam_release()
45 this->attrs); in dmam_release()
52 if (this->vaddr == match->vaddr) { in dmam_match()
53 WARN_ON(this->size != match->size || in dmam_match()
54 this->dma_handle != match->dma_handle); in dmam_match()
61 * dmam_free_coherent - Managed dma_free_coherent()
80 * dmam_alloc_attrs - Managed dma_alloc_attrs()
109 dr->vaddr = vaddr; in dmam_alloc_attrs()
110 dr->dma_handle = *dma_handle; in dmam_alloc_attrs()
111 dr->size = size; in dmam_alloc_attrs()
112 dr->attrs = attrs; in dmam_alloc_attrs()
130 if (dev->dma_ops_bypass) in dma_go_direct()
131 return min_not_zero(mask, dev->bus_dma_limit) >= in dma_go_direct()
139 * Check if the devices uses a direct mapping for streaming DMA operations.
146 return dma_go_direct(dev, dev->coherent_dma_mask, ops); in dma_alloc_direct()
152 return dma_go_direct(dev, *dev->dma_mask, ops); in dma_map_direct()
164 if (WARN_ON_ONCE(!dev->dma_mask)) in dma_map_page_attrs()
173 addr = ops->map_page(dev, page, offset, size, dir, attrs); in dma_map_page_attrs()
195 ops->unmap_page(dev, addr, size, dir, attrs); in dma_unmap_page_attrs()
209 if (WARN_ON_ONCE(!dev->dma_mask)) in __dma_map_sg_attrs()
218 ents = ops->map_sg(dev, sg, nents, dir, attrs); in __dma_map_sg_attrs()
224 } else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM && in __dma_map_sg_attrs()
225 ents != -EIO && ents != -EREMOTEIO)) { in __dma_map_sg_attrs()
227 return -EIO; in __dma_map_sg_attrs()
234 * dma_map_sg_attrs - Map the given buffer for DMA
263 * dma_map_sgtable - Map the given buffer for DMA
279 * -EINVAL An invalid argument, unaligned access or other error
281 * -ENOMEM Insufficient resources (like memory or IOVA space) to
283 * -EIO Legacy error code with an unknown meaning. eg. this is
286 * -EREMOTEIO The DMA device cannot access P2PDMA memory specified
294 nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); in dma_map_sgtable()
297 sgt->nents = nents; in dma_map_sgtable()
316 else if (ops->unmap_sg) in dma_unmap_sg_attrs()
317 ops->unmap_sg(dev, sg, nents, dir, attrs); in dma_unmap_sg_attrs()
329 if (WARN_ON_ONCE(!dev->dma_mask)) in dma_map_resource()
336 else if (ops->map_resource) in dma_map_resource()
337 addr = ops->map_resource(dev, phys_addr, size, dir, attrs); in dma_map_resource()
352 ; /* nothing to do: uncached and no swiotlb */ in dma_unmap_resource()
355 else if (ops->unmap_resource) in dma_unmap_resource()
356 ops->unmap_resource(dev, addr, size, dir, attrs); in dma_unmap_resource()
373 else if (ops->sync_single_for_cpu) in __dma_sync_single_for_cpu()
374 ops->sync_single_for_cpu(dev, addr, size, dir); in __dma_sync_single_for_cpu()
390 else if (ops->sync_single_for_device) in __dma_sync_single_for_device()
391 ops->sync_single_for_device(dev, addr, size, dir); in __dma_sync_single_for_device()
407 else if (ops->sync_sg_for_cpu) in __dma_sync_sg_for_cpu()
408 ops->sync_sg_for_cpu(dev, sg, nelems, dir); in __dma_sync_sg_for_cpu()
424 else if (ops->sync_sg_for_device) in __dma_sync_sg_for_device()
425 ops->sync_sg_for_device(dev, sg, nelems, dir); in __dma_sync_sg_for_device()
456 dev->dma_skip_sync = dev_is_dma_coherent(dev); in dma_setup_need_sync()
457 else if (!ops->sync_single_for_device && !ops->sync_single_for_cpu && in dma_setup_need_sync()
458 !ops->sync_sg_for_device && !ops->sync_sg_for_cpu) in dma_setup_need_sync()
463 dev->dma_skip_sync = true; in dma_setup_need_sync()
465 dev->dma_skip_sync = false; in dma_setup_need_sync()
472 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
494 if (!ops->get_sgtable) in dma_get_sgtable_attrs()
495 return -ENXIO; in dma_get_sgtable_attrs()
496 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); in dma_get_sgtable_attrs()
518 * dma_can_mmap - check if a given device supports dma_mmap_*
532 return ops->mmap != NULL; in dma_can_mmap()
537 * dma_mmap_attrs - map a coherent DMA allocation into user space
538 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
540 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
541 * @dma_addr: device-view address returned from dma_alloc_attrs
561 if (!ops->mmap) in dma_mmap_attrs()
562 return -ENXIO; in dma_mmap_attrs()
563 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); in dma_mmap_attrs()
577 if (ops->get_required_mask) in dma_get_required_mask()
578 return ops->get_required_mask(dev); in dma_get_required_mask()
581 * We require every DMA ops implementation to at least support a 32-bit in dma_get_required_mask()
583 * hardware). As the direct mapping code has its own routine to in dma_get_required_mask()
584 * actually report an optimal mask we default to 32-bit here as that in dma_get_required_mask()
598 WARN_ON_ONCE(!dev->coherent_dma_mask); in dma_alloc_attrs()
621 } else if (ops->alloc) { in dma_alloc_attrs()
622 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); in dma_alloc_attrs()
644 * On non-coherent platforms which implement DMA-coherent buffers via in dma_free_attrs()
645 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting in dma_free_attrs()
662 else if (ops->free) in dma_free_attrs()
663 ops->free(dev, size, cpu_addr, dma_handle, attrs); in dma_free_attrs()
672 if (WARN_ON_ONCE(!dev->coherent_dma_mask)) in __dma_alloc_pages()
684 if (!ops->alloc_pages_op) in __dma_alloc_pages()
686 return ops->alloc_pages_op(dev, size, dma_handle, dir, gfp); in __dma_alloc_pages()
715 else if (ops->free_pages) in __dma_free_pages()
716 ops->free_pages(dev, size, page, dma_handle, dir); in __dma_free_pages()
733 if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff) in dma_mmap_pages()
734 return -ENXIO; in dma_mmap_pages()
735 return remap_pfn_range(vma, vma->vm_start, in dma_mmap_pages()
736 page_to_pfn(page) + vma->vm_pgoff, in dma_mmap_pages()
737 vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot); in dma_mmap_pages()
752 page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp); in alloc_single_sgt()
755 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in alloc_single_sgt()
756 sg_dma_len(sgt->sgl) = sgt->sgl->length; in alloc_single_sgt()
781 sgt->nents = 1; in dma_alloc_noncontiguous()
783 debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs); in dma_alloc_noncontiguous()
794 __dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address, in free_single_sgt()
804 debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); in dma_free_noncontiguous()
820 return page_address(sg_page(sgt->sgl)); in dma_vmap_noncontiguous()
836 return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl)); in dma_mmap_noncontiguous()
851 * ->dma_supported sets and clears the bypass flag, so ignore it here in dma_supported()
855 if (!ops->dma_supported) in dma_supported()
857 return ops->dma_supported(dev, mask); in dma_supported()
873 /* if ops is not set, dma direct and default IOMMU support P2PDMA */ in dma_pci_p2pdma_supported()
886 if (!dev->dma_mask || !dma_supported(dev, mask)) in dma_set_mask()
887 return -EIO; in dma_set_mask()
890 *dev->dma_mask = mask; in dma_set_mask()
906 return -EIO; in dma_set_coherent_mask()
908 dev->coherent_dma_mask = mask; in dma_set_coherent_mask()
914 * dma_addressing_limited - return if the device is addressing limited
925 if (min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < in dma_addressing_limited()
944 else if (ops && ops->max_mapping_size) in dma_max_mapping_size()
945 size = ops->max_mapping_size(dev); in dma_max_mapping_size()
958 else if (ops && ops->opt_mapping_size) in dma_opt_mapping_size()
959 size = ops->opt_mapping_size(); in dma_opt_mapping_size()
972 if (!ops || !ops->get_merge_boundary) in dma_get_merge_boundary()
975 return ops->get_merge_boundary(dev); in dma_get_merge_boundary()