/linux-6.14.4/tools/testing/selftests/kvm/arm64/ |
D | vcpu_width_config.c | 7 * This is a test that ensures that non-mixed-width vCPUs (all 64bit vCPUs 8 * or all 32bit vcPUs) can be configured and mixed-width vCPUs cannot be 44 * Add two vCPUs, then run KVM_ARM_VCPU_INIT for one vCPU with @init0, 71 * Tests that two 64bit vCPUs can be configured, two 32bit vCPUs can be 72 * configured, and two mixed-width vCPUs cannot be configured. 73 * Each of those three cases, configure vCPUs in two different orders. 74 * The one is running KVM_CREATE_VCPU for 2 vCPUs, and then running 93 /* Test with 64bit vCPUs */ in main() 96 "Configuring 64bit EL1 vCPUs failed unexpectedly"); in main() 99 "Configuring 64bit EL1 vCPUs failed unexpectedly"); in main() [all …]
|
D | vgic_init.c | 75 struct kvm_vcpu *vcpus[]) in vm_gic_create_with_vcpus() argument 80 v.vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus); in vm_gic_create_with_vcpus() 137 * DIST/REDIST (or DIST/CPUIF for GICv2). Assumption is 4 vcpus are going to be 336 struct kvm_vcpu *vcpus[NR_VCPUS]; in test_vgic_then_vcpus() local 340 v = vm_gic_create_with_vcpus(gic_dev_type, 1, vcpus); in test_vgic_then_vcpus() 344 /* Add the rest of the VCPUs */ in test_vgic_then_vcpus() 346 vcpus[i] = vm_vcpu_add(v.vm, i, guest_code); in test_vgic_then_vcpus() 348 ret = run_vcpu(vcpus[3]); in test_vgic_then_vcpus() 354 /* All the VCPUs are created before the VGIC KVM device gets initialized */ 357 struct kvm_vcpu *vcpus[NR_VCPUS]; in test_vcpus_then_vgic() local [all …]
|
D | vgic_lpi_stress.c | 29 static struct kvm_vcpu **vcpus; variable 73 /* Round-robin the LPIs to all of the vCPUs in the VM */ in guest_setup_its_mappings() 312 pthread_create(&vcpu_threads[i], NULL, vcpu_worker_thread, vcpus[i]); in run_test() 337 vcpus = malloc(test_data.nr_cpus * sizeof(struct kvm_vcpu)); in setup_vm() 338 TEST_ASSERT(vcpus, "Failed to allocate vCPU array"); in setup_vm() 340 vm = vm_create_with_vcpus(test_data.nr_cpus, guest_code, vcpus); in setup_vm() 344 vcpu_init_descriptor_tables(vcpus[i]); in setup_vm() 360 free(vcpus); in destroy_vm() 366 pr_info(" -v:\tnumber of vCPUs (default: %u)\n", test_data.nr_cpus); in pr_usage()
|
D | arch_timer.c | 167 /* Timer initid should be same for all the vCPUs, so query only vCPU-0 */ in test_init_timer_irq() 168 vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL, in test_init_timer_irq() 170 vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL, in test_init_timer_irq() 187 vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus); in test_vm_create() 204 vcpu_init_descriptor_tables(vcpus[i]); in test_vm_create()
|
/linux-6.14.4/tools/testing/selftests/kvm/x86/ |
D | set_boot_cpu_id.c | 90 struct kvm_vcpu *vcpus[]) in create_vm() argument 102 vcpus[i] = vm_vcpu_add(vm, i, i == bsp_vcpu_id ? guest_bsp_vcpu : in create_vm() 109 struct kvm_vcpu *vcpus[2]; in run_vm_bsp() local 112 vm = create_vm(ARRAY_SIZE(vcpus), bsp_vcpu_id, vcpus); in run_vm_bsp() 114 run_vcpu(vcpus[0]); in run_vm_bsp() 115 run_vcpu(vcpus[1]); in run_vm_bsp() 122 struct kvm_vcpu *vcpus[2]; in check_set_bsp_busy() local 125 vm = create_vm(ARRAY_SIZE(vcpus), 0, vcpus); in check_set_bsp_busy() 127 test_set_bsp_busy(vcpus[1], "after adding vcpu"); in check_set_bsp_busy() 129 run_vcpu(vcpus[0]); in check_set_bsp_busy() [all …]
|
D | recalc_apic_map_test.c | 37 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; in main() local 47 * Create the max number of vCPUs supported by selftests so that KVM in main() 51 vm = vm_create_with_vcpus(KVM_MAX_VCPUS, NULL, vcpus); in main() 54 * Enable x2APIC on all vCPUs so that KVM doesn't bail from the recalc in main() 55 * due to vCPUs having aliased xAPIC IDs (truncated to 8 bits). in main() 58 vcpu_set_msr(vcpus[i], MSR_IA32_APICBASE, LAPIC_X2APIC); in main() 60 TEST_ASSERT_EQ(pthread_create(&thread, NULL, race, vcpus[0]), 0); in main() 62 vcpuN = vcpus[KVM_MAX_VCPUS - 1]; in main()
|
D | dirty_log_page_splitting_test.c | 22 #define VCPUS 2 macro 59 for (i = 0; i < VCPUS; i++) { in run_vcpu_iteration() 103 vm = memstress_create_vm(mode, VCPUS, guest_percpu_mem_size, in run_test() 106 guest_num_pages = (VCPUS * guest_percpu_mem_size) >> vm->page_shift; in run_test() 124 for (i = 0; i < VCPUS; i++) in run_test() 127 memstress_start_vcpu_threads(VCPUS, vcpu_worker); in run_test() 156 /* Run vCPUs again to fault pages back in. */ in run_test() 161 * Tell the vCPU threads to quit. No need to manually check that vCPUs in run_test() 166 memstress_join_vcpu_threads(VCPUS); in run_test() 176 * exist in the data slot, and the vCPUs should have dirtied all pages in run_test() [all …]
|
D | xapic_state_test.c | 123 * Send all flavors of IPIs to non-existent vCPUs. TODO: use number of in test_icr() 124 * vCPUs, not vcpu.id + 1. Arbitrarily use vector 0xff. in test_icr() 174 struct kvm_vcpu *vcpus[NR_VCPUS]; in test_apic_id() local 179 vm = vm_create_with_vcpus(NR_VCPUS, NULL, vcpus); in test_apic_id() 183 apic_base = vcpu_get_msr(vcpus[i], MSR_IA32_APICBASE); in test_apic_id() 190 __test_apic_id(vcpus[i], apic_base); in test_apic_id() 191 __test_apic_id(vcpus[i], apic_base | X2APIC_ENABLE); in test_apic_id() 192 __test_apic_id(vcpus[i], apic_base); in test_apic_id()
|
/linux-6.14.4/tools/testing/selftests/kvm/ |
D | kvm_page_table_test.c | 56 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; member 107 * Before dirty logging, vCPUs concurrently access the first in guest_code() 162 * After dirty logging is stopped, vCPUs concurrently read in guest_code() 256 guest_code, test_args.vcpus); in pre_init_before_test() 311 pr_info("Number of testing vCPUs: %d\n", nr_vcpus); in pre_init_before_test() 319 int vcpus; in vcpus_complete_new_stage() local 321 /* Wake up all the vcpus to run new test stage */ in vcpus_complete_new_stage() 322 for (vcpus = 0; vcpus < nr_vcpus; vcpus++) { in vcpus_complete_new_stage() 326 pr_debug("All vcpus have been notified to continue\n"); in vcpus_complete_new_stage() 328 /* Wait for all the vcpus to complete new test stage */ in vcpus_complete_new_stage() [all …]
|
D | mmu_stress_test.c | 41 * has occurred, otherwise vCPUs may complete their writes and advance in guest_code() 149 * validating *all* of guest memory sync for this stage, as vCPUs will in vcpu_worker() 205 static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus, in spawn_workers() argument 224 info[i].vcpu = vcpus[i]; in spawn_workers() 236 pr_info("Waiting for vCPUs to finish %s...\n", name); in rendezvous_with_vcpus() 242 pr_info("\r%d vCPUs haven't rendezvoused...", in rendezvous_with_vcpus() 249 /* Release the vCPUs after getting the time of the previous action. */ in rendezvous_with_vcpus() 250 pr_info("\rAll vCPUs finished %s, releasing...\n", name); in rendezvous_with_vcpus() 286 struct kvm_vcpu **vcpus; in main() local 309 nr_vcpus = atoi_positive("Number of vCPUs", optarg); in main() [all …]
|
D | kvm_binary_stats_test.c | 179 * The second parameter #vcpu set the number of VCPUs being created. 188 struct kvm_vcpu **vcpus; in main() local 193 /* Get the number of VMs and VCPUs that would be created for testing. */ in main() 212 /* Create VMs and VCPUs */ in main() 216 vcpus = malloc(sizeof(struct kvm_vcpu *) * max_vm * max_vcpu); in main() 217 TEST_ASSERT(vcpus, "Allocate memory for storing vCPU pointers"); in main() 229 vcpus[i * max_vcpu + j] = __vm_vcpu_add(vms[i], j); in main() 248 vcpu_stats_fds[j] = vcpu_get_stats_fd(vcpus[i * max_vcpu + j]); in main() 250 stats_test(vcpu_get_stats_fd(vcpus[i * max_vcpu + j])); in main() 269 free(vcpus); in main()
|
D | access_tracking_perf_test.c | 15 * vCPUs that each touch every page in disjoint regions of memory. Performance 16 * is measured in the time it takes all vCPUs to finish touching their 65 /* Whether to overlap the regions of memory vCPUs access. */ 75 /* The number of vCPUs to create in the VM. */ 138 /* If vCPUs are using an overlapping region, let vCPU 0 mark it idle. */ in mark_vcpu_memory_idle() 268 /* Kick off the vCPUs by incrementing iteration. */ in run_iteration() 273 /* Wait for all vCPUs to finish the iteration. */ in run_iteration() 293 * Even though this parallelizes the work across vCPUs, this is still a in mark_memory_idle() 334 printf("usage: %s [-h] [-m mode] [-b vcpu_bytes] [-v vcpus] [-o] [-s mem_type]\n", in help() 342 printf(" -v: specify the number of vCPUs to run.\n"); in help() [all …]
|
D | arch_timer.c | 15 * period (-p), number of vCPUs (-n), iterations per stage (-i) and timer 17 * even more, an option to migrate the vCPUs across pCPUs (-m), at a 40 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; variable 51 struct kvm_vcpu *vcpu = vcpus[vcpu_idx]; in test_vcpu_run() 185 pr_info("\t-n: Number of vCPUs to configure (default: %u; max: %u)\n", in test_print_help() 191 …pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u… in test_print_help() 206 test_args.nr_vcpus = atoi_positive("Number of vCPUs", optarg); in parse_args() 208 pr_info("Max allowed vCPUs: %u\n", in parse_args()
|
D | memslot_modification_stress_test.c | 101 pr_info("Finished creating vCPUs\n"); in run_test() 105 pr_info("Started all vCPUs\n"); in run_test() 119 " [-b memory] [-v vcpus] [-o] [-i iterations]\n", name); in help() 127 printf(" -v: specify the number of vCPUs to run.\n"); in help() 161 nr_vcpus = atoi_positive("Number of vCPUs", optarg); in main() 163 "Invalid number of vcpus, must be between 1 and %d", in main()
|
D | dirty_log_perf_test.c | 113 * (iteration is negative) so that vCPUs are accessing memory in vcpu_worker() 190 /* Allow the vCPUs to populate memory */ in run_test() 214 * Incrementing the iteration number will start the vCPUs in run_test() 253 * Run vCPUs while dirty logging is being disabled to stress disabling in run_test() 268 * Tell the vCPU threads to quit. No need to manually check that vCPUs in run_test() 296 "[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-r random seed ] [-s mem type]" in help() 311 printf(" -n: Run the vCPUs in nested mode (L2)\n"); in help() 312 printf(" -e: Run vCPUs while dirty logging is being disabled. This\n" in help() 318 printf(" -v: specify the number of vCPUs to run.\n"); in help() 401 nr_vcpus = atoi_positive("Number of vCPUs", optarg); in main() [all …]
|
D | steal_time.c | 342 struct kvm_vcpu *vcpus[NR_VCPUS]; in main() local 363 vm = vm_create_with_vcpus(NR_VCPUS, guest_code, vcpus); in main() 369 TEST_REQUIRE(is_steal_time_supported(vcpus[0])); in main() 374 steal_time_init(vcpus[i], i); in main() 376 vcpu_args_set(vcpus[i], 1, i); in main() 379 run_vcpu(vcpus[i]); in main() 382 run_vcpu(vcpus[i]); in main() 390 /* Steal time from the VCPU. The steal time thread has the same CPU affinity as the VCPUs. */ in main() 403 run_vcpu(vcpus[i]); in main()
|
D | demand_paging_test.c | 209 pr_info("Finished creating vCPUs and starting uffd threads\n"); in run_test() 213 pr_info("Started all vCPUs\n"); in run_test() 247 " [-s type] [-v vcpus] [-c cpu_list] [-o]\n", name); in help() 263 printf(" -v: specify the number of vCPUs to run.\n"); in help() 310 nr_vcpus = atoi_positive("Number of vCPUs", optarg); in main() 312 "Invalid number of vcpus, must be between 1 and %d", max_vcpus); in main()
|
/linux-6.14.4/tools/testing/selftests/kvm/include/ |
D | memstress.h | 41 /* Run vCPUs in L2 instead of L1, if the architecture supports it. */ 45 /* True if all vCPUs are pinned to pCPUs */ 50 /* Test is done, stop running vCPUs. */ 67 void memstress_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct memstress_vcpu_args *)); 68 void memstress_join_vcpu_threads(int vcpus); 72 void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);
|
/linux-6.14.4/tools/testing/selftests/kvm/lib/ |
D | memstress.c | 41 static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; variable 89 struct kvm_vcpu *vcpus[], in memstress_setup_vcpus() argument 100 vcpu_args->vcpu = vcpus[i]; in memstress_setup_vcpus() 116 vcpu_args_set(vcpus[i], 1, i); in memstress_setup_vcpus() 138 /* By default vCPUs will write to memory. */ in memstress_create_vm() 172 memstress_guest_code, vcpus); in memstress_create_vm() 181 * When running vCPUs in L2, restrict the test region to 48 bits to in memstress_create_vm() 220 memstress_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes, in memstress_create_vm() 224 pr_info("Configuring vCPUs to run in L2 (nested).\n"); in memstress_create_vm() 225 memstress_setup_nested(vm, nr_vcpus, vcpus); in memstress_create_vm() [all …]
|
/linux-6.14.4/tools/testing/selftests/kvm/lib/arm64/ |
D | vgic.c | 23 * nr_vcpus - Number of vCPUs supported by this VM 31 * vCPUs for the VM, it must be called after all the vCPUs have been created. 40 TEST_ASSERT(nr_vcpus, "Number of vCPUs cannot be empty"); in vgic_v3_setup() 44 * function after all the vCPUs are added. in vgic_v3_setup() 46 list_for_each(iter, &vm->vcpus) in vgic_v3_setup() 49 "Number of vCPUs requested (%u) doesn't match with the ones created for the VM (%u)", in vgic_v3_setup()
|
/linux-6.14.4/Documentation/virt/kvm/ |
D | vcpu-requests.rst | 36 /* Make request @req of all VCPUs of the VM with struct kvm @kvm. */ 42 and kvm_make_all_cpus_request() has the kicking of all VCPUs built 56 2) Waking a sleeping VCPU. Sleeping VCPUs are VCPU threads outside guest 66 VCPUs have a mode state, ``vcpu->mode``, that is used to track whether the 69 ensure VCPU requests are seen by VCPUs (see "Ensuring Requests Are Seen"), 119 This request informs all VCPUs that the VM is dead and unusable, e.g. due to 153 from VCPUs running in guest mode. That is, sleeping VCPUs do not need 154 to be awakened for these requests. Sleeping VCPUs will handle the 161 proceeding. This flag only applies to VCPUs that would receive IPIs. 188 When making requests to VCPUs, we want to avoid the receiving VCPU [all …]
|
/linux-6.14.4/tools/testing/selftests/kvm/lib/x86/ |
D | memstress.c | 80 void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]) in memstress_setup_nested() argument 97 /* Share the same EPT table across all vCPUs. */ in memstress_setup_nested() 107 vcpu_regs_get(vcpus[vcpu_id], ®s); in memstress_setup_nested() 109 vcpu_regs_set(vcpus[vcpu_id], ®s); in memstress_setup_nested() 110 vcpu_args_set(vcpus[vcpu_id], 2, vmx_gva, vcpu_id); in memstress_setup_nested()
|
/linux-6.14.4/tools/testing/selftests/kvm/riscv/ |
D | arch_timer.c | 85 vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus); in test_vm_create() 86 __TEST_REQUIRE(__vcpu_has_isa_ext(vcpus[0], KVM_RISCV_ISA_EXT_SSTC), in test_vm_create() 93 vcpu_init_vector_tables(vcpus[i]); in test_vm_create() 96 timer_freq = vcpu_get_reg(vcpus[0], RISCV_TIMER_REG(frequency)); in test_vm_create()
|
/linux-6.14.4/Documentation/virt/kvm/x86/ |
D | errata.rst | 57 vCPU has legacy xAPIC enabled, e.g. to bring up hotplugged vCPUs via INIT-SIPI 58 on VMs with > 255 vCPUs. A side effect of the quirk is that, if multiple vCPUs 61 not enabled, KVM follows x86 architecture when processing interrupts (all vCPUs
|
/linux-6.14.4/Documentation/virt/kvm/devices/ |
D | vcpu.rst | 40 all vcpus, while as an SPI it must be a separate number per vcpu. 129 for one VCPU will be used by all the other VCPUs. It isn't possible to set a PMU 155 -EBUSY One or more VCPUs has already run 169 Setting the same PPI for different timers will prevent the VCPUs from running. 170 Setting the interrupt number on a VCPU configures all VCPUs created at that 172 configured values on other VCPUs. Userspace should configure the interrupt 173 numbers on at least one VCPU after creating all VCPUs and before running any 174 VCPUs.
|