Lines Matching +full:compound +full:- +full:device

78 #define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
79 #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
97 list_for_each_entry(ivmm, &svm->inst, head) { in nouveau_ivmm_find()
98 if (ivmm->inst == inst) in nouveau_ivmm_find()
105 NV_DEBUG((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
107 NV_WARN((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
119 args->va_start &= PAGE_MASK; in nouveau_svmm_bind()
120 args->va_end = ALIGN(args->va_end, PAGE_SIZE); in nouveau_svmm_bind()
123 if (args->reserved0 || args->reserved1) in nouveau_svmm_bind()
124 return -EINVAL; in nouveau_svmm_bind()
125 if (args->header & (~NOUVEAU_SVM_BIND_VALID_MASK)) in nouveau_svmm_bind()
126 return -EINVAL; in nouveau_svmm_bind()
127 if (args->va_start >= args->va_end) in nouveau_svmm_bind()
128 return -EINVAL; in nouveau_svmm_bind()
130 cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT; in nouveau_svmm_bind()
136 return -EINVAL; in nouveau_svmm_bind()
140 target = args->header >> NOUVEAU_SVM_BIND_TARGET_SHIFT; in nouveau_svmm_bind()
146 return -EINVAL; in nouveau_svmm_bind()
152 * each device driver. in nouveau_svmm_bind()
154 if (args->stride) in nouveau_svmm_bind()
155 return -EINVAL; in nouveau_svmm_bind()
165 return -EINVAL; in nouveau_svmm_bind()
169 if (!cli->svm.svmm) { in nouveau_svmm_bind()
172 return -EINVAL; in nouveau_svmm_bind()
175 for (addr = args->va_start, end = args->va_end; addr < end;) { in nouveau_svmm_bind()
183 addr = max(addr, vma->vm_start); in nouveau_svmm_bind()
184 next = min(vma->vm_end, end); in nouveau_svmm_bind()
186 nouveau_dmem_migrate_vma(cli->drm, cli->svm.svmm, vma, addr, in nouveau_svmm_bind()
196 args->result = 0; in nouveau_svmm_bind()
210 mutex_lock(&svmm->vmm->cli->drm->svm->mutex); in nouveau_svmm_part()
211 ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst); in nouveau_svmm_part()
213 list_del(&ivmm->head); in nouveau_svmm_part()
216 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex); in nouveau_svmm_part()
227 return -ENOMEM; in nouveau_svmm_join()
228 ivmm->svmm = svmm; in nouveau_svmm_join()
229 ivmm->inst = inst; in nouveau_svmm_join()
231 mutex_lock(&svmm->vmm->cli->drm->svm->mutex); in nouveau_svmm_join()
232 list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst); in nouveau_svmm_join()
233 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex); in nouveau_svmm_join()
238 /* Invalidate SVMM address-range on GPU. */
243 nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR, in nouveau_svmm_invalidate()
246 .size = limit - start, in nouveau_svmm_invalidate()
257 unsigned long start = update->start; in nouveau_svmm_invalidate_range_start()
258 unsigned long limit = update->end; in nouveau_svmm_invalidate_range_start()
261 return -EAGAIN; in nouveau_svmm_invalidate_range_start()
263 SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit); in nouveau_svmm_invalidate_range_start()
265 mutex_lock(&svmm->mutex); in nouveau_svmm_invalidate_range_start()
266 if (unlikely(!svmm->vmm)) in nouveau_svmm_invalidate_range_start()
270 * Ignore invalidation callbacks for device private pages since in nouveau_svmm_invalidate_range_start()
273 if (update->event == MMU_NOTIFY_MIGRATE && in nouveau_svmm_invalidate_range_start()
274 update->owner == svmm->vmm->cli->drm->dev) in nouveau_svmm_invalidate_range_start()
277 if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) { in nouveau_svmm_invalidate_range_start()
278 if (start < svmm->unmanaged.start) { in nouveau_svmm_invalidate_range_start()
280 svmm->unmanaged.limit); in nouveau_svmm_invalidate_range_start()
282 start = svmm->unmanaged.limit; in nouveau_svmm_invalidate_range_start()
288 mutex_unlock(&svmm->mutex); in nouveau_svmm_invalidate_range_start()
307 mutex_lock(&svmm->mutex); in nouveau_svmm_fini()
308 svmm->vmm = NULL; in nouveau_svmm_fini()
309 mutex_unlock(&svmm->mutex); in nouveau_svmm_fini()
310 mmu_notifier_put(&svmm->notifier); in nouveau_svmm_fini()
325 if (!cli->drm->svm) in nouveau_svmm_init()
326 return -ENOSYS; in nouveau_svmm_init()
328 /* Allocate tracking for SVM-enabled VMM. */ in nouveau_svmm_init()
330 return -ENOMEM; in nouveau_svmm_init()
331 svmm->vmm = &cli->svm; in nouveau_svmm_init()
332 svmm->unmanaged.start = args->unmanaged_addr; in nouveau_svmm_init()
333 svmm->unmanaged.limit = args->unmanaged_addr + args->unmanaged_size; in nouveau_svmm_init()
334 mutex_init(&svmm->mutex); in nouveau_svmm_init()
337 mutex_lock(&cli->mutex); in nouveau_svmm_init()
338 if (cli->svm.cli) { in nouveau_svmm_init()
339 ret = -EBUSY; in nouveau_svmm_init()
349 ret = nvif_vmm_ctor(&cli->mmu, "svmVmm", in nouveau_svmm_init()
350 cli->vmm.vmm.object.oclass, MANAGED, in nouveau_svmm_init()
351 args->unmanaged_addr, args->unmanaged_size, in nouveau_svmm_init()
354 }, sizeof(struct gp100_vmm_v0), &cli->svm.vmm); in nouveau_svmm_init()
358 mmap_write_lock(current->mm); in nouveau_svmm_init()
359 svmm->notifier.ops = &nouveau_mn_ops; in nouveau_svmm_init()
360 ret = __mmu_notifier_register(&svmm->notifier, current->mm); in nouveau_svmm_init()
365 cli->svm.svmm = svmm; in nouveau_svmm_init()
366 cli->svm.cli = cli; in nouveau_svmm_init()
367 mmap_write_unlock(current->mm); in nouveau_svmm_init()
368 mutex_unlock(&cli->mutex); in nouveau_svmm_init()
372 mmap_write_unlock(current->mm); in nouveau_svmm_init()
374 mutex_unlock(&cli->mutex); in nouveau_svmm_init()
384 WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object, in nouveau_svm_fault_replay()
400 WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object, in nouveau_svm_fault_cancel()
414 nouveau_svm_fault_cancel(svm, fault->inst, in nouveau_svm_fault_cancel_fault()
415 fault->hub, in nouveau_svm_fault_cancel_fault()
416 fault->gpc, in nouveau_svm_fault_cancel_fault()
417 fault->client); in nouveau_svm_fault_cancel_fault()
434 return -1; in nouveau_svm_fault_priority()
444 if ((ret = (s64)fa->inst - fb->inst)) in nouveau_svm_fault_cmp()
446 if ((ret = (s64)fa->addr - fb->addr)) in nouveau_svm_fault_cmp()
448 return nouveau_svm_fault_priority(fa->access) - in nouveau_svm_fault_cmp()
449 nouveau_svm_fault_priority(fb->access); in nouveau_svm_fault_cmp()
456 struct nvif_object *memory = &buffer->object; in nouveau_svm_fault_cache()
477 if (!buffer->fault[buffer->fault_nr]) { in nouveau_svm_fault_cache()
483 buffer->fault[buffer->fault_nr] = fault; in nouveau_svm_fault_cache()
486 fault = buffer->fault[buffer->fault_nr++]; in nouveau_svm_fault_cache()
487 fault->inst = inst; in nouveau_svm_fault_cache()
488 fault->addr = (u64)addrhi << 32 | addrlo; in nouveau_svm_fault_cache()
489 fault->time = (u64)timehi << 32 | timelo; in nouveau_svm_fault_cache()
490 fault->engine = engine; in nouveau_svm_fault_cache()
491 fault->gpc = gpc; in nouveau_svm_fault_cache()
492 fault->hub = hub; in nouveau_svm_fault_cache()
493 fault->access = (info & 0x000f0000) >> 16; in nouveau_svm_fault_cache()
494 fault->client = client; in nouveau_svm_fault_cache()
495 fault->fault = (info & 0x0000001f); in nouveau_svm_fault_cache()
498 fault->inst, fault->addr, fault->access); in nouveau_svm_fault_cache()
513 if (range->event == MMU_NOTIFY_EXCLUSIVE && in nouveau_svm_range_invalidate()
514 range->owner == sn->svmm->vmm->cli->drm->dev) in nouveau_svm_range_invalidate()
518 * serializes the update to mni->invalidate_seq done by caller and in nouveau_svm_range_invalidate()
525 mutex_lock(&sn->svmm->mutex); in nouveau_svm_range_invalidate()
526 else if (!mutex_trylock(&sn->svmm->mutex)) in nouveau_svm_range_invalidate()
529 mutex_unlock(&sn->svmm->mutex); in nouveau_svm_range_invalidate()
550 if (!(range->hmm_pfns[0] & HMM_PFN_VALID)) { in nouveau_hmm_convert_pfn()
551 args->p.phys[0] = 0; in nouveau_hmm_convert_pfn()
555 page = hmm_pfn_to_page(range->hmm_pfns[0]); in nouveau_hmm_convert_pfn()
557 * Only map compound pages to the GPU if the CPU is also mapping the in nouveau_hmm_convert_pfn()
558 * page as a compound page. Otherwise, the PTE protections might not be in nouveau_hmm_convert_pfn()
559 * consistent (e.g., CPU only maps part of a compound page). in nouveau_hmm_convert_pfn()
561 * CPU mapping (e.g., a PUD sized compound page partially mapped with in nouveau_hmm_convert_pfn()
564 if (hmm_pfn_to_map_order(range->hmm_pfns[0])) { in nouveau_hmm_convert_pfn()
565 unsigned long addr = args->p.addr; in nouveau_hmm_convert_pfn()
567 args->p.page = hmm_pfn_to_map_order(range->hmm_pfns[0]) + in nouveau_hmm_convert_pfn()
569 args->p.size = 1UL << args->p.page; in nouveau_hmm_convert_pfn()
570 args->p.addr &= ~(args->p.size - 1); in nouveau_hmm_convert_pfn()
571 page -= (addr - args->p.addr) >> PAGE_SHIFT; in nouveau_hmm_convert_pfn()
574 args->p.phys[0] = nouveau_dmem_page_addr(page) | in nouveau_hmm_convert_pfn()
578 args->p.phys[0] = page_to_phys(page) | in nouveau_hmm_convert_pfn()
581 if (range->hmm_pfns[0] & HMM_PFN_WRITE) in nouveau_hmm_convert_pfn()
582 args->p.phys[0] |= NVIF_VMM_PFNMAP_V0_W; in nouveau_hmm_convert_pfn()
592 struct mm_struct *mm = svmm->notifier.mm; in nouveau_atomic_range_fault()
595 unsigned long start = args->p.addr; in nouveau_atomic_range_fault()
599 ret = mmu_interval_notifier_insert(&notifier->notifier, mm, in nouveau_atomic_range_fault()
600 args->p.addr, args->p.size, in nouveau_atomic_range_fault()
607 ret = -EBUSY; in nouveau_atomic_range_fault()
611 notifier_seq = mmu_interval_read_begin(&notifier->notifier); in nouveau_atomic_range_fault()
614 &page, drm->dev); in nouveau_atomic_range_fault()
617 ret = -EINVAL; in nouveau_atomic_range_fault()
622 mutex_lock(&svmm->mutex); in nouveau_atomic_range_fault()
623 if (!mmu_interval_read_retry(&notifier->notifier, in nouveau_atomic_range_fault()
626 mutex_unlock(&svmm->mutex); in nouveau_atomic_range_fault()
633 args->p.page = 12; in nouveau_atomic_range_fault()
634 args->p.size = PAGE_SIZE; in nouveau_atomic_range_fault()
635 args->p.addr = start; in nouveau_atomic_range_fault()
636 args->p.phys[0] = page_to_phys(page) | in nouveau_atomic_range_fault()
642 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL); in nouveau_atomic_range_fault()
643 mutex_unlock(&svmm->mutex); in nouveau_atomic_range_fault()
649 mmu_interval_notifier_remove(&notifier->notifier); in nouveau_atomic_range_fault()
664 .notifier = &notifier->notifier, in nouveau_range_fault()
667 .dev_private_owner = drm->dev, in nouveau_range_fault()
669 struct mm_struct *mm = svmm->notifier.mm; in nouveau_range_fault()
672 ret = mmu_interval_notifier_insert(&notifier->notifier, mm, in nouveau_range_fault()
673 args->p.addr, args->p.size, in nouveau_range_fault()
678 range.start = notifier->notifier.interval_tree.start; in nouveau_range_fault()
679 range.end = notifier->notifier.interval_tree.last + 1; in nouveau_range_fault()
683 ret = -EBUSY; in nouveau_range_fault()
692 if (ret == -EBUSY) in nouveau_range_fault()
697 mutex_lock(&svmm->mutex); in nouveau_range_fault()
700 mutex_unlock(&svmm->mutex); in nouveau_range_fault()
708 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL); in nouveau_range_fault()
709 mutex_unlock(&svmm->mutex); in nouveau_range_fault()
712 mmu_interval_notifier_remove(&notifier->notifier); in nouveau_range_fault()
721 struct nouveau_svm *svm = container_of(buffer, typeof(*svm), buffer[buffer->id]); in nouveau_svm_fault()
722 struct nvif_object *device = &svm->drm->client.device.object; in nouveau_svm_fault() local
737 if (buffer->get == buffer->put) { in nouveau_svm_fault()
738 buffer->put = nvif_rd32(device, buffer->putaddr); in nouveau_svm_fault()
739 buffer->get = nvif_rd32(device, buffer->getaddr); in nouveau_svm_fault()
740 if (buffer->get == buffer->put) in nouveau_svm_fault()
743 buffer->fault_nr = 0; in nouveau_svm_fault()
745 SVM_DBG(svm, "get %08x put %08x", buffer->get, buffer->put); in nouveau_svm_fault()
746 while (buffer->get != buffer->put) { in nouveau_svm_fault()
747 nouveau_svm_fault_cache(svm, buffer, buffer->get * 0x20); in nouveau_svm_fault()
748 if (++buffer->get == buffer->entries) in nouveau_svm_fault()
749 buffer->get = 0; in nouveau_svm_fault()
751 nvif_wr32(device, buffer->getaddr, buffer->get); in nouveau_svm_fault()
752 SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr); in nouveau_svm_fault()
758 sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault), in nouveau_svm_fault()
762 mutex_lock(&svm->mutex); in nouveau_svm_fault()
763 for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) { in nouveau_svm_fault()
764 if (!svmm || buffer->fault[fi]->inst != inst) { in nouveau_svm_fault()
766 nouveau_ivmm_find(svm, buffer->fault[fi]->inst); in nouveau_svm_fault()
767 svmm = ivmm ? ivmm->svmm : NULL; in nouveau_svm_fault()
768 inst = buffer->fault[fi]->inst; in nouveau_svm_fault()
769 SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm); in nouveau_svm_fault()
771 buffer->fault[fi]->svmm = svmm; in nouveau_svm_fault()
773 mutex_unlock(&svm->mutex); in nouveau_svm_fault()
782 for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) { in nouveau_svm_fault()
786 /* Cancel any faults from non-SVM channels. */ in nouveau_svm_fault()
787 if (!(svmm = buffer->fault[fi]->svmm)) { in nouveau_svm_fault()
788 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); in nouveau_svm_fault()
791 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr); in nouveau_svm_fault()
796 start = buffer->fault[fi]->addr; in nouveau_svm_fault()
798 if (start < svmm->unmanaged.limit) in nouveau_svm_fault()
799 limit = min_t(u64, limit, svmm->unmanaged.start); in nouveau_svm_fault()
802 * Prepare the GPU-side update of all pages within the in nouveau_svm_fault()
813 switch (buffer->fault[fi]->access) { in nouveau_svm_fault()
828 mm = svmm->notifier.mm; in nouveau_svm_fault()
830 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); in nouveau_svm_fault()
836 ret = nouveau_atomic_range_fault(svmm, svm->drm, in nouveau_svm_fault()
840 ret = nouveau_range_fault(svmm, svm->drm, &args.i, in nouveau_svm_fault()
846 for (fn = fi; ++fn < buffer->fault_nr; ) { in nouveau_svm_fault()
856 if (buffer->fault[fn]->svmm != svmm || in nouveau_svm_fault()
857 buffer->fault[fn]->addr >= limit || in nouveau_svm_fault()
858 (buffer->fault[fi]->access == FAULT_ACCESS_READ && in nouveau_svm_fault()
860 (buffer->fault[fi]->access != FAULT_ACCESS_READ && in nouveau_svm_fault()
861 buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH && in nouveau_svm_fault()
863 (buffer->fault[fi]->access != FAULT_ACCESS_READ && in nouveau_svm_fault()
864 buffer->fault[fi]->access != FAULT_ACCESS_WRITE && in nouveau_svm_fault()
865 buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH && in nouveau_svm_fault()
874 buffer->fault[fi++]; in nouveau_svm_fault()
892 schedule_work(&buffer->work); in nouveau_svm_event()
911 args->i.type = NVIF_IOCTL_V0_MTHD; in nouveau_pfns_alloc()
912 args->m.method = NVIF_VMM_V0_PFNMAP; in nouveau_pfns_alloc()
913 args->p.page = PAGE_SHIFT; in nouveau_pfns_alloc()
915 return args->p.phys; in nouveau_pfns_alloc()
932 args->p.addr = addr; in nouveau_pfns_map()
933 args->p.size = npages << PAGE_SHIFT; in nouveau_pfns_map()
935 mutex_lock(&svmm->mutex); in nouveau_pfns_map()
937 nvif_object_ioctl(&svmm->vmm->vmm.object, args, in nouveau_pfns_map()
940 mutex_unlock(&svmm->mutex); in nouveau_pfns_map()
946 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id]; in nouveau_svm_fault_buffer_fini()
948 nvif_event_block(&buffer->notify); in nouveau_svm_fault_buffer_fini()
949 flush_work(&buffer->work); in nouveau_svm_fault_buffer_fini()
955 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id]; in nouveau_svm_fault_buffer_init()
956 struct nvif_object *device = &svm->drm->client.device.object; in nouveau_svm_fault_buffer_init() local
958 buffer->get = nvif_rd32(device, buffer->getaddr); in nouveau_svm_fault_buffer_init()
959 buffer->put = nvif_rd32(device, buffer->putaddr); in nouveau_svm_fault_buffer_init()
960 SVM_DBG(svm, "get %08x put %08x (init)", buffer->get, buffer->put); in nouveau_svm_fault_buffer_init()
962 return nvif_event_allow(&buffer->notify); in nouveau_svm_fault_buffer_init()
968 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id]; in nouveau_svm_fault_buffer_dtor()
971 if (!nvif_object_constructed(&buffer->object)) in nouveau_svm_fault_buffer_dtor()
976 if (buffer->fault) { in nouveau_svm_fault_buffer_dtor()
977 for (i = 0; buffer->fault[i] && i < buffer->entries; i++) in nouveau_svm_fault_buffer_dtor()
978 kfree(buffer->fault[i]); in nouveau_svm_fault_buffer_dtor()
979 kvfree(buffer->fault); in nouveau_svm_fault_buffer_dtor()
982 nvif_event_dtor(&buffer->notify); in nouveau_svm_fault_buffer_dtor()
983 nvif_object_dtor(&buffer->object); in nouveau_svm_fault_buffer_dtor()
989 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id]; in nouveau_svm_fault_buffer_ctor()
990 struct nouveau_drm *drm = svm->drm; in nouveau_svm_fault_buffer_ctor()
991 struct nvif_object *device = &drm->client.device.object; in nouveau_svm_fault_buffer_ctor() local
995 buffer->id = id; in nouveau_svm_fault_buffer_ctor()
997 ret = nvif_object_ctor(device, "svmFaultBuffer", 0, oclass, &args, in nouveau_svm_fault_buffer_ctor()
998 sizeof(args), &buffer->object); in nouveau_svm_fault_buffer_ctor()
1004 nvif_object_map(&buffer->object, NULL, 0); in nouveau_svm_fault_buffer_ctor()
1005 buffer->entries = args.entries; in nouveau_svm_fault_buffer_ctor()
1006 buffer->getaddr = args.get; in nouveau_svm_fault_buffer_ctor()
1007 buffer->putaddr = args.put; in nouveau_svm_fault_buffer_ctor()
1008 INIT_WORK(&buffer->work, nouveau_svm_fault); in nouveau_svm_fault_buffer_ctor()
1010 ret = nvif_event_ctor(&buffer->object, "svmFault", id, nouveau_svm_event, true, NULL, 0, in nouveau_svm_fault_buffer_ctor()
1011 &buffer->notify); in nouveau_svm_fault_buffer_ctor()
1015 buffer->fault = kvcalloc(buffer->entries, sizeof(*buffer->fault), GFP_KERNEL); in nouveau_svm_fault_buffer_ctor()
1016 if (!buffer->fault) in nouveau_svm_fault_buffer_ctor()
1017 return -ENOMEM; in nouveau_svm_fault_buffer_ctor()
1025 struct nouveau_svm *svm = drm->svm; in nouveau_svm_resume()
1033 struct nouveau_svm *svm = drm->svm; in nouveau_svm_suspend()
1041 struct nouveau_svm *svm = drm->svm; in nouveau_svm_fini()
1044 kfree(drm->svm); in nouveau_svm_fini()
1045 drm->svm = NULL; in nouveau_svm_fini()
1064 if (drm->client.device.info.family > NV_DEVICE_INFO_V0_PASCAL) in nouveau_svm_init()
1067 drm->svm = svm = kzalloc(struct_size(drm->svm, buffer, 1), GFP_KERNEL); in nouveau_svm_init()
1068 if (!drm->svm) in nouveau_svm_init()
1071 drm->svm->drm = drm; in nouveau_svm_init()
1072 mutex_init(&drm->svm->mutex); in nouveau_svm_init()
1073 INIT_LIST_HEAD(&drm->svm->inst); in nouveau_svm_init()
1075 ret = nvif_mclass(&drm->client.device.object, buffers); in nouveau_svm_init()