Lines Matching +full:slot +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/backing-dev.h>
17 * folio_file_pfn - like folio_file_page, but return a pfn.
25 return folio_pfn(folio) + (index & (folio_nr_pages(folio) - 1)); in folio_file_pfn()
28 static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot, in __kvm_gmem_prepare_folio() argument
33 gfn_t gfn = slot->base_gfn + index - slot->gmem.pgoff; in __kvm_gmem_prepare_folio()
52 * The folio must be locked and the gfn must be contained in @slot.
54 * leaking host data and the up-to-date flag is set.
56 static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot, in kvm_gmem_prepare_folio() argument
75 * huge page table entries for GPA->HPA mapping. in kvm_gmem_prepare_folio()
80 WARN_ON(!IS_ALIGNED(slot->gmem.pgoff, 1 << folio_order(folio))); in kvm_gmem_prepare_folio()
81 index = gfn - slot->base_gfn + slot->gmem.pgoff; in kvm_gmem_prepare_folio()
83 r = __kvm_gmem_prepare_folio(kvm, slot, index, folio); in kvm_gmem_prepare_folio()
92 * setting the up-to-date flag before the memory is mapped into the guest.
94 * up-to-date until it's removed.
102 return filemap_grab_folio(inode->i_mapping, index); in kvm_gmem_get_folio()
109 struct kvm_memory_slot *slot; in kvm_gmem_invalidate_begin() local
110 struct kvm *kvm = gmem->kvm; in kvm_gmem_invalidate_begin()
113 xa_for_each_range(&gmem->bindings, index, slot, start, end - 1) { in kvm_gmem_invalidate_begin()
114 pgoff_t pgoff = slot->gmem.pgoff; in kvm_gmem_invalidate_begin()
117 .start = slot->base_gfn + max(pgoff, start) - pgoff, in kvm_gmem_invalidate_begin()
118 .end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff, in kvm_gmem_invalidate_begin()
119 .slot = slot, in kvm_gmem_invalidate_begin()
145 struct kvm *kvm = gmem->kvm; in kvm_gmem_invalidate_end()
147 if (xa_find(&gmem->bindings, &start, end - 1, XA_PRESENT)) { in kvm_gmem_invalidate_end()
156 struct list_head *gmem_list = &inode->i_mapping->i_private_list; in kvm_gmem_punch_hole()
165 filemap_invalidate_lock(inode->i_mapping); in kvm_gmem_punch_hole()
170 truncate_inode_pages_range(inode->i_mapping, offset, offset + len - 1); in kvm_gmem_punch_hole()
175 filemap_invalidate_unlock(inode->i_mapping); in kvm_gmem_punch_hole()
182 struct address_space *mapping = inode->i_mapping; in kvm_gmem_allocate()
188 return -EINVAL; in kvm_gmem_allocate()
200 r = -EINTR; in kvm_gmem_allocate()
215 /* 64-bit only, wrapping the index should be impossible. */ in kvm_gmem_allocate()
233 return -EOPNOTSUPP; in kvm_gmem_fallocate()
236 return -EOPNOTSUPP; in kvm_gmem_fallocate()
239 return -EINVAL; in kvm_gmem_fallocate()
253 struct kvm_gmem *gmem = file->private_data; in kvm_gmem_release()
254 struct kvm_memory_slot *slot; in kvm_gmem_release() local
255 struct kvm *kvm = gmem->kvm; in kvm_gmem_release()
261 * dereferencing the slot for existing bindings needs to be protected in kvm_gmem_release()
271 mutex_lock(&kvm->slots_lock); in kvm_gmem_release()
273 filemap_invalidate_lock(inode->i_mapping); in kvm_gmem_release()
275 xa_for_each(&gmem->bindings, index, slot) in kvm_gmem_release()
276 WRITE_ONCE(slot->gmem.file, NULL); in kvm_gmem_release()
279 * All in-flight operations are gone and new bindings can be created. in kvm_gmem_release()
283 kvm_gmem_invalidate_begin(gmem, 0, -1ul); in kvm_gmem_release()
284 kvm_gmem_invalidate_end(gmem, 0, -1ul); in kvm_gmem_release()
286 list_del(&gmem->entry); in kvm_gmem_release()
288 filemap_invalidate_unlock(inode->i_mapping); in kvm_gmem_release()
290 mutex_unlock(&kvm->slots_lock); in kvm_gmem_release()
292 xa_destroy(&gmem->bindings); in kvm_gmem_release()
300 static inline struct file *kvm_gmem_get_file(struct kvm_memory_slot *slot) in kvm_gmem_get_file() argument
303 * Do not return slot->gmem.file if it has already been closed; in kvm_gmem_get_file()
305 * kvm_gmem_release() clears slot->gmem.file. in kvm_gmem_get_file()
307 return get_file_active(&slot->gmem.file); in kvm_gmem_get_file()
310 static pgoff_t kvm_gmem_get_index(struct kvm_memory_slot *slot, gfn_t gfn) in kvm_gmem_get_index() argument
312 return gfn - slot->base_gfn + slot->gmem.pgoff; in kvm_gmem_get_index()
331 return -EINVAL; in kvm_gmem_migrate_folio()
336 struct list_head *gmem_list = &mapping->i_private_list; in kvm_gmem_error_folio()
342 start = folio->index; in kvm_gmem_error_folio()
352 * access a poisoned page, kvm_gmem_get_pfn() will return -EHWPOISON, in kvm_gmem_error_folio()
389 struct inode *inode = path->dentry->d_inode; in kvm_gmem_getattr()
398 return -EINVAL; in kvm_gmem_setattr()
405 static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags) in __kvm_gmem_create() argument
407 const char *anon_name = "[kvm-gmem]"; in __kvm_gmem_create()
419 err = -ENOMEM; in __kvm_gmem_create()
430 file->f_flags |= O_LARGEFILE; in __kvm_gmem_create()
432 inode = file->f_inode; in __kvm_gmem_create()
433 WARN_ON(file->f_mapping != inode->i_mapping); in __kvm_gmem_create()
435 inode->i_private = (void *)(unsigned long)flags; in __kvm_gmem_create()
436 inode->i_op = &kvm_gmem_iops; in __kvm_gmem_create()
437 inode->i_mapping->a_ops = &kvm_gmem_aops; in __kvm_gmem_create()
438 inode->i_mode |= S_IFREG; in __kvm_gmem_create()
439 inode->i_size = size; in __kvm_gmem_create()
440 mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER); in __kvm_gmem_create()
441 mapping_set_inaccessible(inode->i_mapping); in __kvm_gmem_create()
443 WARN_ON_ONCE(!mapping_unevictable(inode->i_mapping)); in __kvm_gmem_create()
446 gmem->kvm = kvm; in __kvm_gmem_create()
447 xa_init(&gmem->bindings); in __kvm_gmem_create()
448 list_add(&gmem->entry, &inode->i_mapping->i_private_list); in __kvm_gmem_create()
462 loff_t size = args->size; in kvm_gmem_create() local
463 u64 flags = args->flags; in kvm_gmem_create()
467 return -EINVAL; in kvm_gmem_create()
469 if (size <= 0 || !PAGE_ALIGNED(size)) in kvm_gmem_create()
470 return -EINVAL; in kvm_gmem_create()
472 return __kvm_gmem_create(kvm, size, flags); in kvm_gmem_create()
475 int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot, in kvm_gmem_bind() argument
478 loff_t size = slot->npages << PAGE_SHIFT; in kvm_gmem_bind() local
483 int r = -EINVAL; in kvm_gmem_bind()
485 BUILD_BUG_ON(sizeof(gfn_t) != sizeof(slot->gmem.pgoff)); in kvm_gmem_bind()
489 return -EBADF; in kvm_gmem_bind()
491 if (file->f_op != &kvm_gmem_fops) in kvm_gmem_bind()
494 gmem = file->private_data; in kvm_gmem_bind()
495 if (gmem->kvm != kvm) in kvm_gmem_bind()
501 offset + size > i_size_read(inode)) in kvm_gmem_bind()
504 filemap_invalidate_lock(inode->i_mapping); in kvm_gmem_bind()
507 end = start + slot->npages; in kvm_gmem_bind()
509 if (!xa_empty(&gmem->bindings) && in kvm_gmem_bind()
510 xa_find(&gmem->bindings, &start, end - 1, XA_PRESENT)) { in kvm_gmem_bind()
511 filemap_invalidate_unlock(inode->i_mapping); in kvm_gmem_bind()
520 WRITE_ONCE(slot->gmem.file, file); in kvm_gmem_bind()
521 slot->gmem.pgoff = start; in kvm_gmem_bind()
523 xa_store_range(&gmem->bindings, start, end - 1, slot, GFP_KERNEL); in kvm_gmem_bind()
524 filemap_invalidate_unlock(inode->i_mapping); in kvm_gmem_bind()
537 void kvm_gmem_unbind(struct kvm_memory_slot *slot) in kvm_gmem_unbind() argument
539 unsigned long start = slot->gmem.pgoff; in kvm_gmem_unbind()
540 unsigned long end = start + slot->npages; in kvm_gmem_unbind()
548 file = kvm_gmem_get_file(slot); in kvm_gmem_unbind()
552 gmem = file->private_data; in kvm_gmem_unbind()
554 filemap_invalidate_lock(file->f_mapping); in kvm_gmem_unbind()
555 xa_store_range(&gmem->bindings, start, end - 1, NULL, GFP_KERNEL); in kvm_gmem_unbind()
558 * synchronize_srcu(&kvm->srcu) ensured that kvm_gmem_get_pfn() in kvm_gmem_unbind()
561 WRITE_ONCE(slot->gmem.file, NULL); in kvm_gmem_unbind()
562 filemap_invalidate_unlock(file->f_mapping); in kvm_gmem_unbind()
569 struct kvm_memory_slot *slot, in __kvm_gmem_get_pfn() argument
573 struct file *gmem_file = READ_ONCE(slot->gmem.file); in __kvm_gmem_get_pfn()
574 struct kvm_gmem *gmem = file->private_data; in __kvm_gmem_get_pfn()
579 return ERR_PTR(-EFAULT); in __kvm_gmem_get_pfn()
582 gmem = file->private_data; in __kvm_gmem_get_pfn()
583 if (xa_load(&gmem->bindings, index) != slot) { in __kvm_gmem_get_pfn()
584 WARN_ON_ONCE(xa_load(&gmem->bindings, index)); in __kvm_gmem_get_pfn()
585 return ERR_PTR(-EIO); in __kvm_gmem_get_pfn()
595 return ERR_PTR(-EHWPOISON); in __kvm_gmem_get_pfn()
606 int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, in kvm_gmem_get_pfn() argument
610 pgoff_t index = kvm_gmem_get_index(slot, gfn); in kvm_gmem_get_pfn()
611 struct file *file = kvm_gmem_get_file(slot); in kvm_gmem_get_pfn()
617 return -EFAULT; in kvm_gmem_get_pfn()
619 folio = __kvm_gmem_get_pfn(file, slot, index, pfn, &is_prepared, max_order); in kvm_gmem_get_pfn()
626 r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio); in kvm_gmem_get_pfn()
646 struct kvm_memory_slot *slot; in kvm_gmem_populate() local
652 lockdep_assert_held(&kvm->slots_lock); in kvm_gmem_populate()
654 return -EINVAL; in kvm_gmem_populate()
656 slot = gfn_to_memslot(kvm, start_gfn); in kvm_gmem_populate()
657 if (!kvm_slot_can_be_private(slot)) in kvm_gmem_populate()
658 return -EINVAL; in kvm_gmem_populate()
660 file = kvm_gmem_get_file(slot); in kvm_gmem_populate()
662 return -EFAULT; in kvm_gmem_populate()
664 filemap_invalidate_lock(file->f_mapping); in kvm_gmem_populate()
666 npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages); in kvm_gmem_populate()
670 pgoff_t index = kvm_gmem_get_index(slot, gfn); in kvm_gmem_populate()
675 ret = -EINTR; in kvm_gmem_populate()
679 folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, &is_prepared, &max_order); in kvm_gmem_populate()
688 ret = -EEXIST; in kvm_gmem_populate()
694 (npages - i) < (1 << max_order)); in kvm_gmem_populate()
696 ret = -EINVAL; in kvm_gmem_populate()
702 max_order--; in kvm_gmem_populate()
716 filemap_invalidate_unlock(file->f_mapping); in kvm_gmem_populate()