Lines Matching +full:iommu +full:- +full:map +full:- +full:mask
1 // SPDX-License-Identifier: GPL-2.0-only
3 * A fairly generic DMA-API to IOMMU-API glue layer.
5 * Copyright (C) 2014-2015 ARM Ltd.
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
15 #include <linux/dma-direct.h>
16 #include <linux/dma-map-ops.h>
19 #include <linux/iommu.h>
20 #include <linux/iommu-dma.h>
35 #include "dma-iommu.h"
36 #include "iommu-pages.h"
87 /* Options for dma-iommu use */
103 early_param("iommu.forcedac", iommu_dma_forcedac_setup);
121 /* Per-CPU flush queue structure */
130 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) & (fq)->mod_mask)
134 assert_spin_locked(&fq->lock); in fq_full()
135 return (((fq->tail + 1) & fq->mod_mask) == fq->head); in fq_full()
140 unsigned int idx = fq->tail; in fq_ring_add()
142 assert_spin_locked(&fq->lock); in fq_ring_add()
144 fq->tail = (idx + 1) & fq->mod_mask; in fq_ring_add()
151 u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt); in fq_ring_free_locked()
154 assert_spin_locked(&fq->lock); in fq_ring_free_locked()
158 if (fq->entries[idx].counter >= counter) in fq_ring_free_locked()
161 iommu_put_pages_list(&fq->entries[idx].freelist); in fq_ring_free_locked()
162 free_iova_fast(&cookie->iovad, in fq_ring_free_locked()
163 fq->entries[idx].iova_pfn, in fq_ring_free_locked()
164 fq->entries[idx].pages); in fq_ring_free_locked()
166 fq->head = (fq->head + 1) & fq->mod_mask; in fq_ring_free_locked()
174 spin_lock_irqsave(&fq->lock, flags); in fq_ring_free()
176 spin_unlock_irqrestore(&fq->lock, flags); in fq_ring_free()
181 atomic64_inc(&cookie->fq_flush_start_cnt); in fq_flush_iotlb()
182 cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain); in fq_flush_iotlb()
183 atomic64_inc(&cookie->fq_flush_finish_cnt); in fq_flush_iotlb()
191 atomic_set(&cookie->fq_timer_on, 0); in fq_flush_timeout()
194 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) { in fq_flush_timeout()
195 fq_ring_free(cookie, cookie->single_fq); in fq_flush_timeout()
198 fq_ring_free(cookie, per_cpu_ptr(cookie->percpu_fq, cpu)); in fq_flush_timeout()
211 * Order against the IOMMU driver's pagetable update from unmapping in queue_iova()
219 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) in queue_iova()
220 fq = cookie->single_fq; in queue_iova()
222 fq = raw_cpu_ptr(cookie->percpu_fq); in queue_iova()
224 spin_lock_irqsave(&fq->lock, flags); in queue_iova()
240 fq->entries[idx].iova_pfn = pfn; in queue_iova()
241 fq->entries[idx].pages = pages; in queue_iova()
242 fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt); in queue_iova()
243 list_splice(freelist, &fq->entries[idx].freelist); in queue_iova()
245 spin_unlock_irqrestore(&fq->lock, flags); in queue_iova()
248 if (!atomic_read(&cookie->fq_timer_on) && in queue_iova()
249 !atomic_xchg(&cookie->fq_timer_on, 1)) in queue_iova()
250 mod_timer(&cookie->fq_timer, in queue_iova()
251 jiffies + msecs_to_jiffies(cookie->options.fq_timeout)); in queue_iova()
259 iommu_put_pages_list(&fq->entries[idx].freelist); in iommu_dma_free_fq_single()
272 iommu_put_pages_list(&fq->entries[idx].freelist); in iommu_dma_free_fq_percpu()
280 if (!cookie->fq_domain) in iommu_dma_free_fq()
283 del_timer_sync(&cookie->fq_timer); in iommu_dma_free_fq()
284 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) in iommu_dma_free_fq()
285 iommu_dma_free_fq_single(cookie->single_fq); in iommu_dma_free_fq()
287 iommu_dma_free_fq_percpu(cookie->percpu_fq); in iommu_dma_free_fq()
294 fq->head = 0; in iommu_dma_init_one_fq()
295 fq->tail = 0; in iommu_dma_init_one_fq()
296 fq->mod_mask = fq_size - 1; in iommu_dma_init_one_fq()
298 spin_lock_init(&fq->lock); in iommu_dma_init_one_fq()
301 INIT_LIST_HEAD(&fq->entries[i].freelist); in iommu_dma_init_one_fq()
306 size_t fq_size = cookie->options.fq_size; in iommu_dma_init_fq_single()
311 return -ENOMEM; in iommu_dma_init_fq_single()
313 cookie->single_fq = queue; in iommu_dma_init_fq_single()
320 size_t fq_size = cookie->options.fq_size; in iommu_dma_init_fq_percpu()
327 return -ENOMEM; in iommu_dma_init_fq_percpu()
331 cookie->percpu_fq = queue; in iommu_dma_init_fq_percpu()
338 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_init_fq()
341 if (cookie->fq_domain) in iommu_dma_init_fq()
344 atomic64_set(&cookie->fq_flush_start_cnt, 0); in iommu_dma_init_fq()
345 atomic64_set(&cookie->fq_flush_finish_cnt, 0); in iommu_dma_init_fq()
347 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) in iommu_dma_init_fq()
354 return -ENOMEM; in iommu_dma_init_fq()
357 timer_setup(&cookie->fq_timer, fq_flush_timeout, 0); in iommu_dma_init_fq()
358 atomic_set(&cookie->fq_timer_on, 0); in iommu_dma_init_fq()
364 WRITE_ONCE(cookie->fq_domain, domain); in iommu_dma_init_fq()
370 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) in cookie_msi_granule()
371 return cookie->iovad.granule; in cookie_msi_granule()
381 INIT_LIST_HEAD(&cookie->msi_page_list); in cookie_alloc()
382 cookie->type = type; in cookie_alloc()
388 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
389 * @domain: IOMMU domain to prepare for DMA-API usage
393 if (domain->iova_cookie) in iommu_get_dma_cookie()
394 return -EEXIST; in iommu_get_dma_cookie()
396 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); in iommu_get_dma_cookie()
397 if (!domain->iova_cookie) in iommu_get_dma_cookie()
398 return -ENOMEM; in iommu_get_dma_cookie()
400 mutex_init(&domain->iova_cookie->mutex); in iommu_get_dma_cookie()
405 * iommu_get_msi_cookie - Acquire just MSI remapping resources
406 * @domain: IOMMU domain to prepare
420 if (domain->type != IOMMU_DOMAIN_UNMANAGED) in iommu_get_msi_cookie()
421 return -EINVAL; in iommu_get_msi_cookie()
423 if (domain->iova_cookie) in iommu_get_msi_cookie()
424 return -EEXIST; in iommu_get_msi_cookie()
428 return -ENOMEM; in iommu_get_msi_cookie()
430 cookie->msi_iova = base; in iommu_get_msi_cookie()
431 domain->iova_cookie = cookie; in iommu_get_msi_cookie()
437 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
438 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
443 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_put_dma_cookie()
449 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) { in iommu_put_dma_cookie()
451 put_iova_domain(&cookie->iovad); in iommu_put_dma_cookie()
454 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { in iommu_put_dma_cookie()
455 list_del(&msi->list); in iommu_put_dma_cookie()
459 domain->iova_cookie = NULL; in iommu_put_dma_cookie()
463 * iommu_dma_get_resv_regions - Reserved region driver helper
467 * IOMMU drivers can use this to implement their .get_resv_regions callback
468 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
475 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) in iommu_dma_get_resv_regions()
478 if (dev->of_node) in iommu_dma_get_resv_regions()
486 struct iova_domain *iovad = &cookie->iovad; in cookie_init_hw_msi_region()
490 start -= iova_offset(iovad, start); in cookie_init_hw_msi_region()
491 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); in cookie_init_hw_msi_region()
496 return -ENOMEM; in cookie_init_hw_msi_region()
498 msi_page->phys = start; in cookie_init_hw_msi_region()
499 msi_page->iova = start; in cookie_init_hw_msi_region()
500 INIT_LIST_HEAD(&msi_page->list); in cookie_init_hw_msi_region()
501 list_add(&msi_page->list, &cookie->msi_page_list); in cookie_init_hw_msi_region()
502 start += iovad->granule; in cookie_init_hw_msi_region()
514 return res_a->res->start > res_b->res->start; in iommu_dma_ranges_sort()
520 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); in iova_reserve_pci_windows()
525 resource_list_for_each_entry(window, &bridge->windows) { in iova_reserve_pci_windows()
526 if (resource_type(window->res) != IORESOURCE_MEM) in iova_reserve_pci_windows()
529 lo = iova_pfn(iovad, window->res->start - window->offset); in iova_reserve_pci_windows()
530 hi = iova_pfn(iovad, window->res->end - window->offset); in iova_reserve_pci_windows()
535 list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort); in iova_reserve_pci_windows()
536 resource_list_for_each_entry(window, &bridge->dma_ranges) { in iova_reserve_pci_windows()
537 end = window->res->start - window->offset; in iova_reserve_pci_windows()
544 /* DMA ranges should be non-overlapping */ in iova_reserve_pci_windows()
545 dev_err(&dev->dev, in iova_reserve_pci_windows()
546 "Failed to reserve IOVA [%pa-%pa]\n", in iova_reserve_pci_windows()
548 return -EINVAL; in iova_reserve_pci_windows()
551 start = window->res->end - window->offset + 1; in iova_reserve_pci_windows()
553 if (window->node.next == &bridge->dma_ranges && in iova_reserve_pci_windows()
566 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iova_reserve_iommu_regions()
567 struct iova_domain *iovad = &cookie->iovad; in iova_reserve_iommu_regions()
583 if (region->type == IOMMU_RESV_SW_MSI) in iova_reserve_iommu_regions()
586 lo = iova_pfn(iovad, region->start); in iova_reserve_iommu_regions()
587 hi = iova_pfn(iovad, region->start + region->length - 1); in iova_reserve_iommu_regions()
590 if (region->type == IOMMU_RESV_MSI) in iova_reserve_iommu_regions()
591 ret = cookie_init_hw_msi_region(cookie, region->start, in iova_reserve_iommu_regions()
592 region->start + region->length); in iova_reserve_iommu_regions()
603 return dev_is_pci(dev) && to_pci_dev(dev)->untrusted; in dev_is_untrusted()
627 * If kmalloc() buffers are not DMA-safe for this device and in dev_use_sg_swiotlb()
633 if (!dma_kmalloc_size_aligned(s->length)) in dev_use_sg_swiotlb()
641 * iommu_dma_init_options - Initialize dma-iommu options
645 * This allows tuning dma-iommu specific to device properties
651 if (dev->iommu->shadow_on_flush) { in iommu_dma_init_options()
652 options->qt = IOMMU_DMA_OPTS_SINGLE_QUEUE; in iommu_dma_init_options()
653 options->fq_timeout = IOVA_SINGLE_FQ_TIMEOUT; in iommu_dma_init_options()
654 options->fq_size = IOVA_SINGLE_FQ_SIZE; in iommu_dma_init_options()
656 options->qt = IOMMU_DMA_OPTS_PER_CPU_QUEUE; in iommu_dma_init_options()
657 options->fq_size = IOVA_DEFAULT_FQ_SIZE; in iommu_dma_init_options()
658 options->fq_timeout = IOVA_DEFAULT_FQ_TIMEOUT; in iommu_dma_init_options()
663 * iommu_dma_init_domain - Initialise a DMA mapping domain
664 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
673 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_init_domain()
674 const struct bus_dma_region *map = dev->dma_range_map; in iommu_dma_init_domain() local
679 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) in iommu_dma_init_domain()
680 return -EINVAL; in iommu_dma_init_domain()
682 iovad = &cookie->iovad; in iommu_dma_init_domain()
685 order = __ffs(domain->pgsize_bitmap); in iommu_dma_init_domain()
689 if (map) { in iommu_dma_init_domain()
690 if (dma_range_map_min(map) > domain->geometry.aperture_end || in iommu_dma_init_domain()
691 dma_range_map_max(map) < domain->geometry.aperture_start) { in iommu_dma_init_domain()
692 pr_warn("specified DMA range outside IOMMU capability\n"); in iommu_dma_init_domain()
693 return -EFAULT; in iommu_dma_init_domain()
698 domain->geometry.aperture_start >> order); in iommu_dma_init_domain()
700 /* start_pfn is always nonzero for an already-initialised domain */ in iommu_dma_init_domain()
701 mutex_lock(&cookie->mutex); in iommu_dma_init_domain()
702 if (iovad->start_pfn) { in iommu_dma_init_domain()
703 if (1UL << order != iovad->granule || in iommu_dma_init_domain()
704 base_pfn != iovad->start_pfn) { in iommu_dma_init_domain()
706 ret = -EFAULT; in iommu_dma_init_domain()
719 iommu_dma_init_options(&cookie->options, dev); in iommu_dma_init_domain()
722 if (domain->type == IOMMU_DOMAIN_DMA_FQ && in iommu_dma_init_domain()
724 domain->type = IOMMU_DOMAIN_DMA; in iommu_dma_init_domain()
729 mutex_unlock(&cookie->mutex); in iommu_dma_init_domain()
734 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
737 * @coherent: Is the DMA master cache-coherent?
740 * Return: corresponding IOMMU API page protection flags
765 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_alloc_iova()
766 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_alloc_iova()
769 if (cookie->type == IOMMU_DMA_MSI_COOKIE) { in iommu_dma_alloc_iova()
770 cookie->msi_iova += size; in iommu_dma_alloc_iova()
771 return cookie->msi_iova - size; in iommu_dma_alloc_iova()
777 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); in iommu_dma_alloc_iova()
779 if (domain->geometry.force_aperture) in iommu_dma_alloc_iova()
780 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); in iommu_dma_alloc_iova()
783 * Try to use all the 32-bit PCI addresses first. The original SAC vs. in iommu_dma_alloc_iova()
786 * venture into the 64-bit space until necessary. in iommu_dma_alloc_iova()
790 * some inherent bug in handling >32-bit addresses, or not all the in iommu_dma_alloc_iova()
791 * expected address bits are wired up between the device and the IOMMU. in iommu_dma_alloc_iova()
793 if (dma_limit > DMA_BIT_MASK(32) && dev->iommu->pci_32bit_workaround) { in iommu_dma_alloc_iova()
799 dev->iommu->pci_32bit_workaround = false; in iommu_dma_alloc_iova()
800 dev_notice(dev, "Using %d-bit DMA addresses\n", bits_per(dma_limit)); in iommu_dma_alloc_iova()
811 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_free_iova()
814 if (cookie->type == IOMMU_DMA_MSI_COOKIE) in iommu_dma_free_iova()
815 cookie->msi_iova -= size; in iommu_dma_free_iova()
816 else if (gather && gather->queued) in iommu_dma_free_iova()
819 &gather->freelist); in iommu_dma_free_iova()
829 struct iommu_dma_cookie *cookie = domain->iova_cookie; in __iommu_dma_unmap()
830 struct iova_domain *iovad = &cookie->iovad; in __iommu_dma_unmap()
835 dma_addr -= iova_off; in __iommu_dma_unmap()
838 iotlb_gather.queued = READ_ONCE(cookie->fq_domain); in __iommu_dma_unmap()
852 struct iommu_dma_cookie *cookie = domain->iova_cookie; in __iommu_dma_map()
853 struct iova_domain *iovad = &cookie->iovad; in __iommu_dma_map()
872 if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) { in __iommu_dma_map()
881 while (count--) in __iommu_dma_free_pages()
900 /* IOMMU can map any pages, so himem can also be used here */ in __iommu_dma_alloc_pages()
908 * Higher-order allocations are a convenience rather in __iommu_dma_alloc_pages()
910 * falling back to minimum-order allocations. in __iommu_dma_alloc_pages()
931 count -= order_size; in __iommu_dma_alloc_pages()
932 while (order_size--) in __iommu_dma_alloc_pages()
940 * but an IOMMU which supports smaller pages might not map the whole thing.
946 struct iommu_dma_cookie *cookie = domain->iova_cookie; in __iommu_dma_alloc_noncontiguous()
947 struct iova_domain *iovad = &cookie->iovad; in __iommu_dma_alloc_noncontiguous()
950 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; in __iommu_dma_alloc_noncontiguous()
959 min_size = alloc_sizes & -alloc_sizes; in __iommu_dma_alloc_noncontiguous()
976 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); in __iommu_dma_alloc_noncontiguous()
981 * Remove the zone/policy flags from the GFP - these are applied to the in __iommu_dma_alloc_noncontiguous()
994 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) in __iommu_dma_alloc_noncontiguous()
995 arch_dma_prep_coherent(sg_page(sg), sg->length); in __iommu_dma_alloc_noncontiguous()
998 ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, ioprot, in __iommu_dma_alloc_noncontiguous()
1003 sgt->sgl->dma_address = iova; in __iommu_dma_alloc_noncontiguous()
1004 sgt->sgl->dma_length = size; in __iommu_dma_alloc_noncontiguous()
1027 *dma_handle = sgt.sgl->dma_address; in iommu_dma_alloc_remap()
1045 * the DMA-API internal vmaping and freeing easier we stash away the page
1047 * e.g. when a vmap-variant that takes a scatterlist comes along.
1065 sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp, attrs); in iommu_dma_alloc_noncontiguous()
1066 if (!sh->pages) { in iommu_dma_alloc_noncontiguous()
1070 return &sh->sgt; in iommu_dma_alloc_noncontiguous()
1078 __iommu_dma_unmap(dev, sgt->sgl->dma_address, size); in iommu_dma_free_noncontiguous()
1079 __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT); in iommu_dma_free_noncontiguous()
1080 sg_free_table(&sh->sgt); in iommu_dma_free_noncontiguous()
1089 return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL); in iommu_dma_vmap_noncontiguous()
1097 if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff) in iommu_dma_mmap_noncontiguous()
1098 return -ENXIO; in iommu_dma_mmap_noncontiguous()
1099 return vm_map_pages(vma, sgt_handle(sgt)->pages, count); in iommu_dma_mmap_noncontiguous()
1141 sg->length, dir); in iommu_dma_sync_sg_for_cpu()
1144 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); in iommu_dma_sync_sg_for_cpu()
1157 sg->length, dir); in iommu_dma_sync_sg_for_device()
1160 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); in iommu_dma_sync_sg_for_device()
1171 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_map_page()
1172 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_map_page()
1182 dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n"); in iommu_dma_map_page()
1196 * leftover kernel data, so zero the pre- and post-padding. in iommu_dma_map_page()
1203 /* Pre-padding */ in iommu_dma_map_page()
1205 memset((void *)start, 0, virt - start); in iommu_dma_map_page()
1207 /* Post-padding */ in iommu_dma_map_page()
1210 iova_align(iovad, start) - start); in iommu_dma_map_page()
1242 * Prepare a successfully-mapped scatterlist to give back to the caller.
1261 unsigned int s_iova_len = s->length; in __finalise_sg()
1279 s->offset += s_iova_off; in __finalise_sg()
1280 s->length = s_length; in __finalise_sg()
1284 * - there is a valid output segment to append to in __finalise_sg()
1285 * - and this segment starts on an IOVA page boundary in __finalise_sg()
1286 * - but doesn't fall at a segment boundary in __finalise_sg()
1287 * - and wouldn't make the resulting output segment too long in __finalise_sg()
1290 (max_len - cur_len >= s_length)) { in __finalise_sg()
1326 s->offset += sg_dma_address(s); in __invalidate_sg()
1328 s->length = sg_dma_len(s); in __invalidate_sg()
1356 s->offset, s->length, dir, attrs); in iommu_dma_map_sg_swiotlb()
1359 sg_dma_len(s) = s->length; in iommu_dma_map_sg_swiotlb()
1366 return -EIO; in iommu_dma_map_sg_swiotlb()
1371 * any old buffer layout, but the IOMMU API requires everything to be
1372 * aligned to IOMMU pages. Hence the need for this complicated bit of
1373 * impedance-matching, to be able to hand off a suitably-aligned list,
1380 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_map_sg()
1381 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_map_sg()
1385 enum pci_p2pdma_map_type map; in iommu_dma_map_sg() local
1388 unsigned long mask = dma_get_seg_boundary(dev); in iommu_dma_map_sg() local
1406 * IOVA granules for the IOMMU driver to handle. With some clever in iommu_dma_map_sg()
1407 * trickery we can modify the list in-place, but reversibly, by in iommu_dma_map_sg()
1408 * stashing the unaligned parts in the as-yet-unused DMA fields. in iommu_dma_map_sg()
1411 size_t s_iova_off = iova_offset(iovad, s->offset); in iommu_dma_map_sg()
1412 size_t s_length = s->length; in iommu_dma_map_sg()
1413 size_t pad_len = (mask - iova_len + 1) & mask; in iommu_dma_map_sg()
1416 map = pci_p2pdma_map_segment(&p2pdma_state, dev, s); in iommu_dma_map_sg()
1417 switch (map) { in iommu_dma_map_sg()
1434 ret = -EREMOTEIO; in iommu_dma_map_sg()
1441 s->offset -= s_iova_off; in iommu_dma_map_sg()
1443 s->length = s_length; in iommu_dma_map_sg()
1447 * depend on these assumptions about the segment boundary mask: in iommu_dma_map_sg()
1448 * - If mask size >= IOVA size, then the IOVA range cannot in iommu_dma_map_sg()
1450 * - If mask size < IOVA size, then the IOVA range must start in iommu_dma_map_sg()
1454 * - The mask must be a power of 2, so pad_len == 0 if in iommu_dma_map_sg()
1458 if (pad_len && pad_len < s_length - 1) { in iommu_dma_map_sg()
1459 prev->length += pad_len; in iommu_dma_map_sg()
1472 ret = -ENOMEM; in iommu_dma_map_sg()
1477 * We'll leave any physical concatenation to the IOMMU driver's in iommu_dma_map_sg()
1478 * implementation - it knows better than we do. in iommu_dma_map_sg()
1491 if (ret != -ENOMEM && ret != -EREMOTEIO) in iommu_dma_map_sg()
1492 return -EINVAL; in iommu_dma_map_sg()
1529 nents -= i; in iommu_dma_unmap_sg()
1543 __iommu_dma_unmap(dev, start, end - start); in iommu_dma_unmap_sg()
1566 /* Non-coherent atomic allocation? Easy */ in __iommu_dma_free()
1573 * If it the address is remapped, then it's either non-coherent in __iommu_dma_free()
1660 dev->coherent_dma_mask); in iommu_dma_alloc()
1674 unsigned long pfn, off = vma->vm_pgoff; in iommu_dma_mmap()
1677 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); in iommu_dma_mmap()
1682 if (off >= nr_pages || vma_pages(vma) > nr_pages - off) in iommu_dma_mmap()
1683 return -ENXIO; in iommu_dma_mmap()
1695 return remap_pfn_range(vma, vma->vm_start, pfn + off, in iommu_dma_mmap()
1696 vma->vm_end - vma->vm_start, in iommu_dma_mmap()
1697 vma->vm_page_prot); in iommu_dma_mmap()
1723 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in iommu_dma_get_sgtable()
1731 return (1UL << __ffs(domain->pgsize_bitmap)) - 1; in iommu_dma_get_merge_boundary()
1752 dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac; in iommu_setup_dma_ops()
1754 dev->dma_iommu = iommu_is_dma_domain(domain); in iommu_setup_dma_ops()
1755 if (dev->dma_iommu && iommu_dma_init_domain(domain, dev)) in iommu_setup_dma_ops()
1760 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", in iommu_setup_dma_ops()
1762 dev->dma_iommu = false; in iommu_setup_dma_ops()
1768 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_get_msi_page()
1774 msi_addr &= ~(phys_addr_t)(size - 1); in iommu_dma_get_msi_page()
1775 list_for_each_entry(msi_page, &cookie->msi_page_list, list) in iommu_dma_get_msi_page()
1776 if (msi_page->phys == msi_addr) in iommu_dma_get_msi_page()
1790 INIT_LIST_HEAD(&msi_page->list); in iommu_dma_get_msi_page()
1791 msi_page->phys = msi_addr; in iommu_dma_get_msi_page()
1792 msi_page->iova = iova; in iommu_dma_get_msi_page()
1793 list_add(&msi_page->list, &cookie->msi_page_list); in iommu_dma_get_msi_page()
1804 * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
1817 if (!domain || !domain->iova_cookie) { in iommu_dma_prepare_msi()
1818 desc->iommu_cookie = NULL; in iommu_dma_prepare_msi()
1834 return -ENOMEM; in iommu_dma_prepare_msi()
1839 * iommu_dma_compose_msi_msg() - Apply translation to an MSI message
1851 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) in iommu_dma_compose_msi_msg()
1854 msg->address_hi = upper_32_bits(msi_page->iova); in iommu_dma_compose_msi_msg()
1855 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; in iommu_dma_compose_msi_msg()
1856 msg->address_lo += lower_32_bits(msi_page->iova); in iommu_dma_compose_msi_msg()