Lines Matching +full:dma +full:- +full:write
1 // SPDX-License-Identifier: MIT
8 #include <linux/dma-mapping.h>
19 return (end - start) >> PAGE_SHIFT; in xe_npages_in_range()
34 if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) { in xe_alloc_sg()
36 return -EAGAIN; in xe_alloc_sg()
39 npages = xe_npages_in_range(range->start, range->end); in xe_alloc_sg()
43 hmm_pfn = range->hmm_pfns[i]; in xe_alloc_sg()
48 /* If order > 0 the page may extend beyond range->start */ in xe_alloc_sg()
49 len -= (hmm_pfn & ~HMM_PFN_FLAGS) & (len - 1); in xe_alloc_sg()
59 * xe_build_sg() - build a scatter gather table for all the physical pages/pfn
60 * in a hmm_range. dma-map pages if necessary. dma-address is save in sg table
62 * @xe: the xe device who will access the dma-address in sg table
63 * @range: the hmm range that we build the sg table from. range->hmm_pfns[]
67 * @write: whether we write to this range. This decides dma map direction
68 * for system pages. If write we map it bi-diretional; otherwise
77 * do a dma-mapping so it can be accessed by GPU/DMA.
82 * address), and there is no need of dma-mapping. This is TBD.
84 * FIXME: dma-mapping for peer gpu device to access remote gpu's
90 * Returns 0 if successful; -ENOMEM if fails to allocate memory
95 bool write) in xe_build_sg() argument
97 unsigned long npages = xe_npages_in_range(range->start, range->end); in xe_build_sg()
98 struct device *dev = xe->drm.dev; in xe_build_sg()
106 for_each_sg(st->sgl, sgl, st->nents, j) { in xe_build_sg()
109 hmm_pfn = range->hmm_pfns[i]; in xe_build_sg()
114 size -= page_to_pfn(page) & (size - 1); in xe_build_sg()
117 if (unlikely(j == st->nents - 1)) { in xe_build_sg()
120 size -= (i - npages); in xe_build_sg()
130 return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, in xe_build_sg()
136 struct xe_userptr *userptr = &uvma->userptr; in xe_hmm_userptr_set_mapped()
137 struct xe_vm *vm = xe_vma_vm(&uvma->vma); in xe_hmm_userptr_set_mapped()
139 lockdep_assert_held_write(&vm->lock); in xe_hmm_userptr_set_mapped()
140 lockdep_assert_held(&vm->userptr.notifier_lock); in xe_hmm_userptr_set_mapped()
142 mutex_lock(&userptr->unmap_mutex); in xe_hmm_userptr_set_mapped()
143 xe_assert(vm->xe, !userptr->mapped); in xe_hmm_userptr_set_mapped()
144 userptr->mapped = true; in xe_hmm_userptr_set_mapped()
145 mutex_unlock(&userptr->unmap_mutex); in xe_hmm_userptr_set_mapped()
150 struct xe_userptr *userptr = &uvma->userptr; in xe_hmm_userptr_unmap()
151 struct xe_vma *vma = &uvma->vma; in xe_hmm_userptr_unmap()
152 bool write = !xe_vma_read_only(vma); in xe_hmm_userptr_unmap() local
154 struct xe_device *xe = vm->xe; in xe_hmm_userptr_unmap()
156 if (!lockdep_is_held_type(&vm->userptr.notifier_lock, 0) && in xe_hmm_userptr_unmap()
157 !lockdep_is_held_type(&vm->lock, 0) && in xe_hmm_userptr_unmap()
158 !(vma->gpuva.flags & XE_VMA_DESTROYED)) { in xe_hmm_userptr_unmap()
162 lockdep_assert_held(&vm->lock); in xe_hmm_userptr_unmap()
165 mutex_lock(&userptr->unmap_mutex); in xe_hmm_userptr_unmap()
166 if (userptr->sg && userptr->mapped) in xe_hmm_userptr_unmap()
167 dma_unmap_sgtable(xe->drm.dev, userptr->sg, in xe_hmm_userptr_unmap()
168 write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0); in xe_hmm_userptr_unmap()
169 userptr->mapped = false; in xe_hmm_userptr_unmap()
170 mutex_unlock(&userptr->unmap_mutex); in xe_hmm_userptr_unmap()
174 * xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr
179 * sg table, and dma unmap the address in the table.
183 struct xe_userptr *userptr = &uvma->userptr; in xe_hmm_userptr_free_sg()
185 xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg); in xe_hmm_userptr_free_sg()
187 sg_free_table(userptr->sg); in xe_hmm_userptr_free_sg()
188 userptr->sg = NULL; in xe_hmm_userptr_free_sg()
192 * xe_hmm_userptr_populate_range() - Populate physical pages of a virtual
207 * This must be called with mmap read or write lock held.
221 struct xe_vma *vma = &uvma->vma; in xe_hmm_userptr_populate_range()
230 .notifier = &uvma->userptr.notifier, in xe_hmm_userptr_populate_range()
231 .dev_private_owner = vm->xe, in xe_hmm_userptr_populate_range()
233 bool write = !xe_vma_read_only(vma); in xe_hmm_userptr_populate_range() local
238 userptr = &uvma->userptr; in xe_hmm_userptr_populate_range()
241 mmap_assert_locked(userptr->notifier.mm); in xe_hmm_userptr_populate_range()
243 if (vma->gpuva.flags & XE_VMA_DESTROYED) in xe_hmm_userptr_populate_range()
246 notifier_seq = mmu_interval_read_begin(&userptr->notifier); in xe_hmm_userptr_populate_range()
247 if (notifier_seq == userptr->notifier_seq) in xe_hmm_userptr_populate_range()
250 if (userptr->sg) in xe_hmm_userptr_populate_range()
256 return -ENOMEM; in xe_hmm_userptr_populate_range()
258 if (write) in xe_hmm_userptr_populate_range()
261 if (!mmget_not_zero(userptr->notifier.mm)) { in xe_hmm_userptr_populate_range()
262 ret = -EFAULT; in xe_hmm_userptr_populate_range()
269 hmm_range.notifier_seq = mmu_interval_read_begin(&userptr->notifier); in xe_hmm_userptr_populate_range()
272 mmap_read_lock(userptr->notifier.mm); in xe_hmm_userptr_populate_range()
277 mmap_read_unlock(userptr->notifier.mm); in xe_hmm_userptr_populate_range()
279 if (ret == -EBUSY) { in xe_hmm_userptr_populate_range()
288 mmput(userptr->notifier.mm); in xe_hmm_userptr_populate_range()
293 ret = xe_alloc_sg(vm->xe, &userptr->sgt, &hmm_range, &vm->userptr.notifier_lock); in xe_hmm_userptr_populate_range()
297 ret = down_read_interruptible(&vm->userptr.notifier_lock); in xe_hmm_userptr_populate_range()
302 ret = -EAGAIN; in xe_hmm_userptr_populate_range()
306 ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, in xe_hmm_userptr_populate_range()
307 &vm->userptr.notifier_lock, write); in xe_hmm_userptr_populate_range()
311 userptr->sg = &userptr->sgt; in xe_hmm_userptr_populate_range()
313 userptr->notifier_seq = hmm_range.notifier_seq; in xe_hmm_userptr_populate_range()
314 up_read(&vm->userptr.notifier_lock); in xe_hmm_userptr_populate_range()
319 up_read(&vm->userptr.notifier_lock); in xe_hmm_userptr_populate_range()
321 sg_free_table(&userptr->sgt); in xe_hmm_userptr_populate_range()