Home
last modified time | relevance | path

Searched +full:vm +full:- +full:map (Results 1 – 25 of 475) sorted by relevance

12345678910>>...19

/linux-6.14.4/drivers/gpu/drm/panthor/
Dpanthor_mmu.c1 // SPDX-License-Identifier: GPL-2.0 or MIT
16 #include <linux/dma-mapping.h>
20 #include <linux/io-pgtable.h>
42 * struct panthor_as_slot - Address space slot
45 /** @vm: VM bound to this slot. NULL is no VM is bound. */
46 struct panthor_vm *vm; member
50 * struct panthor_mmu - MMU related data
59 * us to re-assign them to re-assign slots on-demand.
77 * We use this list to pick a VM to evict when all slots are
88 /** @vm: VMs management fields */
[all …]
Dpanthor_gem.h1 /* SPDX-License-Identifier: GPL-2.0 or MIT */
11 #include <linux/iosys-map.h>
17 * struct panthor_gem_object - Driver specific GEM object.
24 * @exclusive_vm_root_gem: Root GEM of the exclusive VM this GEM object
28 * different VM will fail.
31 * VM.
42 * called in a dma-signaling path, where we're not allowed to take
52 * struct panthor_kernel_bo - Kernel buffer object.
65 * @vm: VM this private buffer is attached to.
67 struct panthor_vm *vm; member
[all …]
Dpanthor_gem.c1 // SPDX-License-Identifier: GPL-2.0 or MIT
5 #include <linux/dma-buf.h>
6 #include <linux/dma-mapping.h>
19 struct drm_gem_object *vm_root_gem = bo->exclusive_vm_root_gem; in panthor_gem_free_object()
21 drm_gem_free_mmap_offset(&bo->base.base); in panthor_gem_free_object()
22 mutex_destroy(&bo->gpuva_list_lock); in panthor_gem_free_object()
23 drm_gem_shmem_free(&bo->base); in panthor_gem_free_object()
28 * panthor_kernel_bo_destroy() - Destroy a kernel buffer object
34 struct panthor_vm *vm; in panthor_kernel_bo_destroy() local
40 vm = bo->vm; in panthor_kernel_bo_destroy()
[all …]
/linux-6.14.4/drivers/irqchip/
Dirq-gic-v4.c1 // SPDX-License-Identifier: GPL-2.0-only
14 #include <linux/irqchip/arm-gic-v4.h>
26 * agnostic actually means KVM-specific - what were you thinking?).
31 * - Any guest-visible VLPI is backed by a Linux interrupt (and a
37 * - Enabling/disabling a VLPI is done by issuing mask/unmask calls.
39 * - Guest INT/CLEAR commands are implemented through
42 * - The *bizarre* stuff (mapping/unmapping an interrupt to a VLPI, or
46 * confines the crap to a single location. And map/unmap really is
62 * interrupts which are delivered when a VLPI targeting a non-running
71 * - VMs (or rather the VM abstraction that matters to the GIC)
[all …]
/linux-6.14.4/drivers/gpu/drm/xe/
Dxe_migrate_doc.h1 /* SPDX-License-Identifier: MIT */
14 * a migrate engine, and uses a special VM for all generated jobs.
16 * Special VM details
19 * The special VM is configured with a page structure where we can dynamically
20 * map BOs which need to be copied and cleared, dynamically map other VM's page
21 * table BOs for updates, and identity map the entire device's VRAM with 1 GB
37 * VM of the engine is the migrate VM.
39 * The first batch is used to update the migration VM page structure to point to
40 * the bind VM page table BOs which need to be updated. A physical page is
46 * has VRAM the bind VM page table BOs are in VRAM and the identity mapping can
[all …]
Dxe_vm.c1 // SPDX-License-Identifier: MIT
8 #include <linux/dma-fence-array.h>
42 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) in xe_vm_obj() argument
44 return vm->gpuvm.r_obj; in xe_vm_obj()
48 * xe_vma_userptr_check_repin() - Advisory check for repin needed
53 * without the vm->userptr.notifier_lock held. There is no guarantee that the
57 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
61 return mmu_interval_check_retry(&uvma->userptr.notifier, in xe_vma_userptr_check_repin()
62 uvma->userptr.notifier_seq) ? in xe_vma_userptr_check_repin()
63 -EAGAIN : 0; in xe_vma_userptr_check_repin()
[all …]
Dxe_vm_types.h1 /* SPDX-License-Identifier: MIT */
11 #include <linux/dma-resv.h>
46 /** struct xe_userptr - User pointer */
48 /** @invalidate_link: Link for the vm::userptr.invalidated list */
50 /** @userptr: link into VM repin list if userptr. */
62 /** @unmap_mutex: Mutex protecting dma-unmapping */
66 * write: vm->userptr.notifier_lock in read mode and vm->resv held.
67 * read: vm->userptr.notifier_lock in write mode or vm->resv held.
70 /** @mapped: Whether the @sgt sg-table is dma-mapped. Protected by @unmap_mutex. */
83 * Locking: vm lock in write mode OR vm lock in read mode and the vm's
[all …]
Dxe_pt.c1 // SPDX-License-Identifier: MIT
6 #include <linux/dma-fence-array.h>
29 /** @children: Array of page-table child nodes */
31 /** @staging: Array of page-table staging nodes */
36 #define xe_pt_set_addr(__xe_pt, __addr) ((__xe_pt)->addr = (__addr))
37 #define xe_pt_addr(__xe_pt) ((__xe_pt)->addr)
46 #define XE_PT_HIGHEST_LEVEL (ARRAY_SIZE(xe_normal_pt_shifts) - 1)
56 return container_of(pt_dir->staging[index], struct xe_pt, base); in xe_pt_entry_staging()
59 static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm, in __xe_pt_empty_pte() argument
63 u16 pat_index = xe->pat.idx[XE_CACHE_WB]; in __xe_pt_empty_pte()
[all …]
Dxe_lrc.c1 // SPDX-License-Identifier: MIT
45 return gt_to_xe(lrc->fence_ctx.gt); in lrc_to_xe()
85 * The per-platform tables are u8-encoded in @data. Decode @data and set the
90 * [7]: create NOPs - number of NOPs are set in lower bits
120 const u32 base = hwe->mmio_base; in set_offsets()
141 xe_gt_assert(hwe->gt, count); in set_offsets()
154 } while (--count); in set_offsets()
576 if (xe_gt_has_indirect_ring_state(hwe->gt)) in set_context_control()
585 struct xe_memirq *memirq = &gt_to_tile(hwe->gt)->memirq; in set_memory_based_intr()
586 struct xe_device *xe = gt_to_xe(hwe->gt); in set_memory_based_intr()
[all …]
/linux-6.14.4/tools/testing/selftests/mm/
Dcompaction_test.c1 // SPDX-License-Identifier: GPL-2.0
25 void *map; member
32 char *cmd = "cat /proc/meminfo | grep -i memfree | grep -o '[0-9]*'"; in read_memory_info()
37 return -1; in read_memory_info()
43 cmd = "cat /proc/meminfo | grep -i hugepagesize | grep -o '[0-9]*'"; in read_memory_info()
48 return -1; in read_memory_info()
62 fd = open("/proc/sys/vm/compact_unevictable_allowed", in prereq()
65 ksft_print_msg("Failed to open /proc/sys/vm/compact_unevictable_allowed: %s\n", in prereq()
67 return -1; in prereq()
71 ksft_print_msg("Failed to read from /proc/sys/vm/compact_unevictable_allowed: %s\n", in prereq()
[all …]
/linux-6.14.4/drivers/virt/acrn/
Dmm.c1 // SPDX-License-Identifier: GPL-2.0
19 static int modify_region(struct acrn_vm *vm, struct vm_memory_region_op *region) in modify_region() argument
26 return -ENOMEM; in modify_region()
28 regions->vmid = vm->vmid; in modify_region()
29 regions->regions_num = 1; in modify_region()
30 regions->regions_gpa = virt_to_phys(region); in modify_region()
35 "Failed to set memory region for VM[%u]!\n", vm->vmid); in modify_region()
42 * acrn_mm_region_add() - Set up the EPT mapping of a memory region.
43 * @vm: User VM.
44 * @user_gpa: A GPA of User VM.
[all …]
/linux-6.14.4/Documentation/devicetree/bindings/hwmon/
Dmoortec,mr75203.yaml1 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
3 ---
5 $schema: http://devicetree.org/meta-schemas/core.yaml#
10 - Rahul Tanwar <[email protected]>
19 *) Temperature Sensor (TS) - used to monitor core temperature (e.g. mr74137).
20 *) Voltage Monitor (VM) - used to monitor voltage levels (e.g. mr74138).
21 *) Process Detector (PD) - used to assess silicon speed (e.g. mr74139).
22 *) Delay Chain - ring oscillator connected to the PD, used to measure IO
25 *) Pre Scaler - provides divide-by-X scaling of input voltage, which can then
26 be presented for VM for measurement within its range (e.g. mr76006 -
[all …]
/linux-6.14.4/tools/testing/selftests/kvm/x86/
Dnx_huge_pages_test.c1 // SPDX-License-Identifier: GPL-2.0-only
41 * Exit the VM after each memory access so that the userspace component of the
42 * test can make assertions about the pages backing the VM.
72 static void check_2m_page_count(struct kvm_vm *vm, int expected_pages_2m) in check_2m_page_count() argument
76 actual_pages_2m = vm_get_stat(vm, "pages_2m"); in check_2m_page_count()
83 static void check_split_count(struct kvm_vm *vm, int expected_splits) in check_split_count() argument
87 actual_splits = vm_get_stat(vm, "nx_lpage_splits"); in check_split_count()
101 ts.tv_nsec = (reclaim_wait_ms - (ts.tv_sec * 1000)) * 1000000; in wait_for_reclaim()
109 struct kvm_vm *vm; in run_test() local
114 vm = vm_create(1); in run_test()
[all …]
Dvmx_dirty_log_test.c1 // SPDX-License-Identifier: GPL-2.0
62 GUEST_ASSERT(vmx->vmcs_gpa); in l1_guest_code()
66 if (vmx->eptp_gpa) in l1_guest_code()
88 struct kvm_vm *vm; in test_vmx_dirty_log() local
94 /* Create VM */ in test_vmx_dirty_log()
95 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); in test_vmx_dirty_log()
96 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); in test_vmx_dirty_log()
100 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, in test_vmx_dirty_log()
107 * Add an identity map for GVA range [0xc0000000, 0xc0002000). This in test_vmx_dirty_log()
110 virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES); in test_vmx_dirty_log()
[all …]
/linux-6.14.4/tools/testing/selftests/kvm/lib/x86/
Dvmx.c1 // SPDX-License-Identifier: GPL-2.0-only
6 #include <asm/msr-index.h>
64 * vm - The VM to allocate guest-virtual addresses in.
67 * p_vmx_gva - The guest virtual address for the struct vmx_pages.
73 vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) in vcpu_alloc_vmx() argument
75 vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx()
76 struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva); in vcpu_alloc_vmx()
79 vmx->vmxon = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx()
80 vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx()
81 vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx()
[all …]
Dmemstress.c1 // SPDX-License-Identifier: GPL-2.0
3 * x86-specific extensions to memstress.c.
38 GUEST_ASSERT(vmx->vmcs_gpa); in memstress_l1_guest_code()
43 rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1]; in memstress_l1_guest_code()
55 * 513 page tables is enough to identity-map 256 TiB of L2 with 1G in memstress_nested_pages()
56 * pages and 4-level paging, plus a few pages per-vCPU for data in memstress_nested_pages()
62 void memstress_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm) in memstress_setup_ept() argument
66 prepare_eptp(vmx, vm, 0); in memstress_setup_ept()
69 * Identity map the first 4G and the test region with 1G pages so that in memstress_setup_ept()
73 nested_identity_map_1g(vmx, vm, 0, 0x100000000ULL); in memstress_setup_ept()
[all …]
/linux-6.14.4/include/uapi/drm/
Dpanthor_drm.h1 /* SPDX-License-Identifier: MIT */
19 * - Structures must be aligned on 64-bit/8-byte. If the object is not
21 * - Fields must be explicitly aligned to their natural type alignment with
23 * - All padding fields will be checked by the driver to make sure they are
25 * - Flags can be added, but not removed/replaced.
26 * - New fields can be added to the main structures (the structures
31 * - New fields can be added to indirect objects (objects pointed by the
35 * - If the kernel driver is too old to know some fields, those will be
37 * - If userspace is too old to know some fields, those will be zeroed
39 * - Each new flag/field addition must come with a driver version update so
[all …]
/linux-6.14.4/arch/arm/mm/
Dmmu.c1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1995-2005 Russell King
35 #include <asm/mach/map.h>
46 * zero-initialized data and COW.
52 * The pmd table for the upper-most set of pages.
143 int i, selected = -1; in early_cachepolicy()
154 if (selected == -1) in early_cachepolicy()
240 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
467 pr_warn("Forcing write-allocate cache policy for SMP\n"); in build_mem_type_table()
478 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those in build_mem_type_table()
[all …]
/linux-6.14.4/drivers/gpu/drm/imagination/
Dpvr_vm.c1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
29 * This is the "top level" datatype in the VM code. It's exposed in the public
34 * struct pvr_vm_context - Context type used to represent a single VM.
49 /** @lock: Global lock on this VM. */
62 * @dummy_gem: GEM object to enable VM reservation. All private BOs
77 kref_get(&vm_ctx->ref_count); in pvr_vm_context_get()
83 * pvr_vm_get_page_table_root_addr() - Get the DMA address of the root of the
84 * page table structure behind a VM context.
85 * @vm_ctx: Target VM context.
89 return pvr_mmu_get_root_table_dma_addr(vm_ctx->mmu_ctx); in pvr_vm_get_page_table_root_addr()
[all …]
/linux-6.14.4/include/linux/irqchip/
Darm-gic-v4.h1 /* SPDX-License-Identifier: GPL-2.0-only */
14 * ITSList mechanism to perform inter-ITS synchronization.
30 * and map/unmap when using the ITSList mechanism.
33 * vmapp_lock -> vpe_lock ->vmovp_lock.
43 /* per-vPE VLPI tracking */
77 * vPE and vLPI operations using vpe->col_idx.
86 /* Unique (system-wide) VPE identifier */
98 * @vm: Pointer to the GICv4 notion of a VM
105 struct its_vm *vm; member
127 struct its_vlpi_map *map; member
[all …]
/linux-6.14.4/drivers/gpu/drm/i915/selftests/
Di915_vma.c43 if (vma->vm != ctx->vm) { in assert_vma()
44 pr_err("VMA created with wrong VM\n"); in assert_vma()
48 if (vma->size != obj->base.size) { in assert_vma()
50 vma->size, obj->base.size); in assert_vma()
54 if (vma->gtt_view.type != I915_GTT_VIEW_NORMAL) { in assert_vma()
56 vma->gtt_view.type); in assert_vma()
65 struct i915_address_space *vm, in checked_vma_instance() argument
71 vma = i915_vma_instance(obj, vm, view); in checked_vma_instance()
76 if (vma->vm != vm) { in checked_vma_instance()
77 pr_err("VMA's vm [%p] does not match request [%p]\n", in checked_vma_instance()
[all …]
/linux-6.14.4/drivers/gpu/drm/i915/gem/selftests/
Di915_gem_context.c2 * SPDX-License-Identifier: MIT
42 int err = -ENODEV; in live_nop_switch()
52 if (!DRIVER_CAPS(i915)->has_logical_contexts) in live_nop_switch()
61 err = -ENOMEM; in live_nop_switch()
88 i915_request_await_dma_fence(this, &rq->fence); in live_nop_switch()
96 intel_gt_set_wedged(engine->gt); in live_nop_switch()
98 err = -EIO; in live_nop_switch()
106 nctx, engine->name, ktime_to_ns(times[1] - times[0])); in live_nop_switch()
108 err = igt_live_test_begin(&t, i915, __func__, engine->name); in live_nop_switch()
127 i915_request_await_dma_fence(this, &rq->fence); in live_nop_switch()
[all …]
/linux-6.14.4/drivers/gpu/drm/lima/
Dlima_gem.c1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /* Copyright 2017-2019 Qiang Yu <[email protected]> */
5 #include <linux/iosys-map.h>
9 #include <linux/dma-mapping.h>
21 int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) in lima_heap_alloc() argument
24 struct address_space *mapping = bo->base.base.filp->f_mapping; in lima_heap_alloc()
25 struct device *dev = bo->base.base.dev->dev; in lima_heap_alloc()
26 size_t old_size = bo->heap_size; in lima_heap_alloc()
27 size_t new_size = bo->heap_size ? bo->heap_size * 2 : in lima_heap_alloc()
32 if (bo->heap_size >= bo->base.base.size) in lima_heap_alloc()
[all …]
/linux-6.14.4/tools/testing/selftests/kvm/s390/
Ducontrol_test.c1 // SPDX-License-Identifier: GPL-2.0-only
43 TEST_REQUIRE((data->effective & CAP_TO_MASK(CAP_SYS_ADMIN)) > 0); in require_ucontrol_admin()
122 * create VM with single vcpu, map kvm_run and SIE control block for easy access
131 self->kvm_fd = open_kvm_dev_path_or_exit(); in FIXTURE_SETUP()
132 self->vm_fd = ioctl(self->kvm_fd, KVM_CREATE_VM, KVM_VM_S390_UCONTROL); in FIXTURE_SETUP()
133 ASSERT_GE(self->vm_fd, 0); in FIXTURE_SETUP()
135 kvm_device_attr_get(self->vm_fd, KVM_S390_VM_CPU_MODEL, in FIXTURE_SETUP()
137 TH_LOG("create VM 0x%llx", info.cpuid); in FIXTURE_SETUP()
139 self->vcpu_fd = ioctl(self->vm_fd, KVM_CREATE_VCPU, 0); in FIXTURE_SETUP()
140 ASSERT_GE(self->vcpu_fd, 0); in FIXTURE_SETUP()
[all …]
/linux-6.14.4/sound/pci/ctxfi/
Dctvmem.c1 // SPDX-License-Identifier: GPL-2.0-only
26 * Find or create vm block based on requested @size.
30 get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc) in get_vm_block() argument
36 if (size > vm->size) { in get_vm_block()
37 dev_err(atc->card->dev, in get_vm_block()
42 mutex_lock(&vm->lock); in get_vm_block()
43 list_for_each(pos, &vm->unused) { in get_vm_block()
45 if (entry->size >= size) in get_vm_block()
48 if (pos == &vm->unused) in get_vm_block()
51 if (entry->size == size) { in get_vm_block()
[all …]

12345678910>>...19