1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vpmu_counter_access - Test vPMU event counter access
4  *
5  * Copyright (c) 2023 Google LLC.
6  *
7  * This test checks if the guest can see the same number of the PMU event
8  * counters (PMCR_EL0.N) that userspace sets, if the guest can access
9  * those counters, and if the guest is prevented from accessing any
10  * other counters.
11  * It also checks if the userspace accesses to the PMU regsisters honor the
12  * PMCR.N value that's set for the guest.
13  * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host.
14  */
15 #include <kvm_util.h>
16 #include <processor.h>
17 #include <test_util.h>
18 #include <vgic.h>
19 #include <perf/arm_pmuv3.h>
20 #include <linux/bitfield.h>
21 
22 /* The max number of the PMU event counters (excluding the cycle counter) */
23 #define ARMV8_PMU_MAX_GENERAL_COUNTERS	(ARMV8_PMU_MAX_COUNTERS - 1)
24 
25 /* The cycle counter bit position that's common among the PMU registers */
26 #define ARMV8_PMU_CYCLE_IDX		31
27 
28 struct vpmu_vm {
29 	struct kvm_vm *vm;
30 	struct kvm_vcpu *vcpu;
31 	int gic_fd;
32 };
33 
34 static struct vpmu_vm vpmu_vm;
35 
36 struct pmreg_sets {
37 	uint64_t set_reg_id;
38 	uint64_t clr_reg_id;
39 };
40 
41 #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}
42 
get_pmcr_n(uint64_t pmcr)43 static uint64_t get_pmcr_n(uint64_t pmcr)
44 {
45 	return FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
46 }
47 
set_pmcr_n(uint64_t * pmcr,uint64_t pmcr_n)48 static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
49 {
50 	u64p_replace_bits((__u64 *) pmcr, pmcr_n, ARMV8_PMU_PMCR_N);
51 }
52 
get_counters_mask(uint64_t n)53 static uint64_t get_counters_mask(uint64_t n)
54 {
55 	uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
56 
57 	if (n)
58 		mask |= GENMASK(n - 1, 0);
59 	return mask;
60 }
61 
62 /* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
read_sel_evcntr(int sel)63 static inline unsigned long read_sel_evcntr(int sel)
64 {
65 	write_sysreg(sel, pmselr_el0);
66 	isb();
67 	return read_sysreg(pmxevcntr_el0);
68 }
69 
70 /* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
write_sel_evcntr(int sel,unsigned long val)71 static inline void write_sel_evcntr(int sel, unsigned long val)
72 {
73 	write_sysreg(sel, pmselr_el0);
74 	isb();
75 	write_sysreg(val, pmxevcntr_el0);
76 	isb();
77 }
78 
79 /* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
read_sel_evtyper(int sel)80 static inline unsigned long read_sel_evtyper(int sel)
81 {
82 	write_sysreg(sel, pmselr_el0);
83 	isb();
84 	return read_sysreg(pmxevtyper_el0);
85 }
86 
87 /* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
write_sel_evtyper(int sel,unsigned long val)88 static inline void write_sel_evtyper(int sel, unsigned long val)
89 {
90 	write_sysreg(sel, pmselr_el0);
91 	isb();
92 	write_sysreg(val, pmxevtyper_el0);
93 	isb();
94 }
95 
pmu_disable_reset(void)96 static void pmu_disable_reset(void)
97 {
98 	uint64_t pmcr = read_sysreg(pmcr_el0);
99 
100 	/* Reset all counters, disabling them */
101 	pmcr &= ~ARMV8_PMU_PMCR_E;
102 	write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
103 	isb();
104 }
105 
106 #define RETURN_READ_PMEVCNTRN(n) \
107 	return read_sysreg(pmevcntr##n##_el0)
read_pmevcntrn(int n)108 static unsigned long read_pmevcntrn(int n)
109 {
110 	PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
111 	return 0;
112 }
113 
114 #define WRITE_PMEVCNTRN(n) \
115 	write_sysreg(val, pmevcntr##n##_el0)
write_pmevcntrn(int n,unsigned long val)116 static void write_pmevcntrn(int n, unsigned long val)
117 {
118 	PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
119 	isb();
120 }
121 
122 #define READ_PMEVTYPERN(n) \
123 	return read_sysreg(pmevtyper##n##_el0)
read_pmevtypern(int n)124 static unsigned long read_pmevtypern(int n)
125 {
126 	PMEVN_SWITCH(n, READ_PMEVTYPERN);
127 	return 0;
128 }
129 
130 #define WRITE_PMEVTYPERN(n) \
131 	write_sysreg(val, pmevtyper##n##_el0)
write_pmevtypern(int n,unsigned long val)132 static void write_pmevtypern(int n, unsigned long val)
133 {
134 	PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
135 	isb();
136 }
137 
138 /*
139  * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0
140  * accessors that test cases will use. Each of the accessors will
141  * either directly reads/writes PMEV{CNTR,TYPER}<n>_EL0
142  * (i.e. {read,write}_pmev{cnt,type}rn()), or reads/writes them through
143  * PMXEV{CNTR,TYPER}_EL0 (i.e. {read,write}_sel_ev{cnt,type}r()).
144  *
145  * This is used to test that combinations of those accessors provide
146  * the consistent behavior.
147  */
148 struct pmc_accessor {
149 	/* A function to be used to read PMEVTCNTR<n>_EL0 */
150 	unsigned long	(*read_cntr)(int idx);
151 	/* A function to be used to write PMEVTCNTR<n>_EL0 */
152 	void		(*write_cntr)(int idx, unsigned long val);
153 	/* A function to be used to read PMEVTYPER<n>_EL0 */
154 	unsigned long	(*read_typer)(int idx);
155 	/* A function to be used to write PMEVTYPER<n>_EL0 */
156 	void		(*write_typer)(int idx, unsigned long val);
157 };
158 
159 struct pmc_accessor pmc_accessors[] = {
160 	/* test with all direct accesses */
161 	{ read_pmevcntrn, write_pmevcntrn, read_pmevtypern, write_pmevtypern },
162 	/* test with all indirect accesses */
163 	{ read_sel_evcntr, write_sel_evcntr, read_sel_evtyper, write_sel_evtyper },
164 	/* read with direct accesses, and write with indirect accesses */
165 	{ read_pmevcntrn, write_sel_evcntr, read_pmevtypern, write_sel_evtyper },
166 	/* read with indirect accesses, and write with direct accesses */
167 	{ read_sel_evcntr, write_pmevcntrn, read_sel_evtyper, write_pmevtypern },
168 };
169 
170 /*
171  * Convert a pointer of pmc_accessor to an index in pmc_accessors[],
172  * assuming that the pointer is one of the entries in pmc_accessors[].
173  */
174 #define PMC_ACC_TO_IDX(acc)	(acc - &pmc_accessors[0])
175 
176 #define GUEST_ASSERT_BITMAP_REG(regname, mask, set_expected)			 \
177 {										 \
178 	uint64_t _tval = read_sysreg(regname);					 \
179 										 \
180 	if (set_expected)							 \
181 		__GUEST_ASSERT((_tval & mask),					 \
182 				"tval: 0x%lx; mask: 0x%lx; set_expected: %u",	 \
183 				_tval, mask, set_expected);			 \
184 	else									 \
185 		__GUEST_ASSERT(!(_tval & mask),					 \
186 				"tval: 0x%lx; mask: 0x%lx; set_expected: %u",	 \
187 				_tval, mask, set_expected);			 \
188 }
189 
190 /*
191  * Check if @mask bits in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers
192  * are set or cleared as specified in @set_expected.
193  */
check_bitmap_pmu_regs(uint64_t mask,bool set_expected)194 static void check_bitmap_pmu_regs(uint64_t mask, bool set_expected)
195 {
196 	GUEST_ASSERT_BITMAP_REG(pmcntenset_el0, mask, set_expected);
197 	GUEST_ASSERT_BITMAP_REG(pmcntenclr_el0, mask, set_expected);
198 	GUEST_ASSERT_BITMAP_REG(pmintenset_el1, mask, set_expected);
199 	GUEST_ASSERT_BITMAP_REG(pmintenclr_el1, mask, set_expected);
200 	GUEST_ASSERT_BITMAP_REG(pmovsset_el0, mask, set_expected);
201 	GUEST_ASSERT_BITMAP_REG(pmovsclr_el0, mask, set_expected);
202 }
203 
204 /*
205  * Check if the bit in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers corresponding
206  * to the specified counter (@pmc_idx) can be read/written as expected.
207  * When @set_op is true, it tries to set the bit for the counter in
208  * those registers by writing the SET registers (the bit won't be set
209  * if the counter is not implemented though).
210  * Otherwise, it tries to clear the bits in the registers by writing
211  * the CLR registers.
212  * Then, it checks if the values indicated in the registers are as expected.
213  */
test_bitmap_pmu_regs(int pmc_idx,bool set_op)214 static void test_bitmap_pmu_regs(int pmc_idx, bool set_op)
215 {
216 	uint64_t pmcr_n, test_bit = BIT(pmc_idx);
217 	bool set_expected = false;
218 
219 	if (set_op) {
220 		write_sysreg(test_bit, pmcntenset_el0);
221 		write_sysreg(test_bit, pmintenset_el1);
222 		write_sysreg(test_bit, pmovsset_el0);
223 
224 		/* The bit will be set only if the counter is implemented */
225 		pmcr_n = get_pmcr_n(read_sysreg(pmcr_el0));
226 		set_expected = (pmc_idx < pmcr_n) ? true : false;
227 	} else {
228 		write_sysreg(test_bit, pmcntenclr_el0);
229 		write_sysreg(test_bit, pmintenclr_el1);
230 		write_sysreg(test_bit, pmovsclr_el0);
231 	}
232 	check_bitmap_pmu_regs(test_bit, set_expected);
233 }
234 
235 /*
236  * Tests for reading/writing registers for the (implemented) event counter
237  * specified by @pmc_idx.
238  */
test_access_pmc_regs(struct pmc_accessor * acc,int pmc_idx)239 static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
240 {
241 	uint64_t write_data, read_data;
242 
243 	/* Disable all PMCs and reset all PMCs to zero. */
244 	pmu_disable_reset();
245 
246 	/*
247 	 * Tests for reading/writing {PMCNTEN,PMINTEN,PMOVS}{SET,CLR}_EL1.
248 	 */
249 
250 	/* Make sure that the bit in those registers are set to 0 */
251 	test_bitmap_pmu_regs(pmc_idx, false);
252 	/* Test if setting the bit in those registers works */
253 	test_bitmap_pmu_regs(pmc_idx, true);
254 	/* Test if clearing the bit in those registers works */
255 	test_bitmap_pmu_regs(pmc_idx, false);
256 
257 	/*
258 	 * Tests for reading/writing the event type register.
259 	 */
260 
261 	/*
262 	 * Set the event type register to an arbitrary value just for testing
263 	 * of reading/writing the register.
264 	 * Arm ARM says that for the event from 0x0000 to 0x003F,
265 	 * the value indicated in the PMEVTYPER<n>_EL0.evtCount field is
266 	 * the value written to the field even when the specified event
267 	 * is not supported.
268 	 */
269 	write_data = (ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMUV3_PERFCTR_INST_RETIRED);
270 	acc->write_typer(pmc_idx, write_data);
271 	read_data = acc->read_typer(pmc_idx);
272 	__GUEST_ASSERT(read_data == write_data,
273 		       "pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx",
274 		       pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data);
275 
276 	/*
277 	 * Tests for reading/writing the event count register.
278 	 */
279 
280 	read_data = acc->read_cntr(pmc_idx);
281 
282 	/* The count value must be 0, as it is disabled and reset */
283 	__GUEST_ASSERT(read_data == 0,
284 		       "pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx",
285 		       pmc_idx, PMC_ACC_TO_IDX(acc), read_data);
286 
287 	write_data = read_data + pmc_idx + 0x12345;
288 	acc->write_cntr(pmc_idx, write_data);
289 	read_data = acc->read_cntr(pmc_idx);
290 	__GUEST_ASSERT(read_data == write_data,
291 		       "pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx",
292 		       pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data);
293 }
294 
295 #define INVALID_EC	(-1ul)
296 uint64_t expected_ec = INVALID_EC;
297 
guest_sync_handler(struct ex_regs * regs)298 static void guest_sync_handler(struct ex_regs *regs)
299 {
300 	uint64_t esr, ec;
301 
302 	esr = read_sysreg(esr_el1);
303 	ec = ESR_ELx_EC(esr);
304 
305 	__GUEST_ASSERT(expected_ec == ec,
306 			"PC: 0x%lx; ESR: 0x%lx; EC: 0x%lx; EC expected: 0x%lx",
307 			regs->pc, esr, ec, expected_ec);
308 
309 	/* skip the trapping instruction */
310 	regs->pc += 4;
311 
312 	/* Use INVALID_EC to indicate an exception occurred */
313 	expected_ec = INVALID_EC;
314 }
315 
316 /*
317  * Run the given operation that should trigger an exception with the
318  * given exception class. The exception handler (guest_sync_handler)
319  * will reset op_end_addr to 0, expected_ec to INVALID_EC, and skip
320  * the instruction that trapped.
321  */
322 #define TEST_EXCEPTION(ec, ops)				\
323 ({							\
324 	GUEST_ASSERT(ec != INVALID_EC);			\
325 	WRITE_ONCE(expected_ec, ec);			\
326 	dsb(ish);					\
327 	ops;						\
328 	GUEST_ASSERT(expected_ec == INVALID_EC);	\
329 })
330 
331 /*
332  * Tests for reading/writing registers for the unimplemented event counter
333  * specified by @pmc_idx (>= PMCR_EL0.N).
334  */
test_access_invalid_pmc_regs(struct pmc_accessor * acc,int pmc_idx)335 static void test_access_invalid_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
336 {
337 	/*
338 	 * Reading/writing the event count/type registers should cause
339 	 * an UNDEFINED exception.
340 	 */
341 	TEST_EXCEPTION(ESR_ELx_EC_UNKNOWN, acc->read_cntr(pmc_idx));
342 	TEST_EXCEPTION(ESR_ELx_EC_UNKNOWN, acc->write_cntr(pmc_idx, 0));
343 	TEST_EXCEPTION(ESR_ELx_EC_UNKNOWN, acc->read_typer(pmc_idx));
344 	TEST_EXCEPTION(ESR_ELx_EC_UNKNOWN, acc->write_typer(pmc_idx, 0));
345 	/*
346 	 * The bit corresponding to the (unimplemented) counter in
347 	 * {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers should be RAZ.
348 	 */
349 	test_bitmap_pmu_regs(pmc_idx, 1);
350 	test_bitmap_pmu_regs(pmc_idx, 0);
351 }
352 
353 /*
354  * The guest is configured with PMUv3 with @expected_pmcr_n number of
355  * event counters.
356  * Check if @expected_pmcr_n is consistent with PMCR_EL0.N, and
357  * if reading/writing PMU registers for implemented or unimplemented
358  * counters works as expected.
359  */
guest_code(uint64_t expected_pmcr_n)360 static void guest_code(uint64_t expected_pmcr_n)
361 {
362 	uint64_t pmcr, pmcr_n, unimp_mask;
363 	int i, pmc;
364 
365 	__GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS,
366 			"Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%x",
367 			expected_pmcr_n, ARMV8_PMU_MAX_GENERAL_COUNTERS);
368 
369 	pmcr = read_sysreg(pmcr_el0);
370 	pmcr_n = get_pmcr_n(pmcr);
371 
372 	/* Make sure that PMCR_EL0.N indicates the value userspace set */
373 	__GUEST_ASSERT(pmcr_n == expected_pmcr_n,
374 			"Expected PMCR.N: 0x%lx, PMCR.N: 0x%lx",
375 			expected_pmcr_n, pmcr_n);
376 
377 	/*
378 	 * Make sure that (RAZ) bits corresponding to unimplemented event
379 	 * counters in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers are reset
380 	 * to zero.
381 	 * (NOTE: bits for implemented event counters are reset to UNKNOWN)
382 	 */
383 	unimp_mask = GENMASK_ULL(ARMV8_PMU_MAX_GENERAL_COUNTERS - 1, pmcr_n);
384 	check_bitmap_pmu_regs(unimp_mask, false);
385 
386 	/*
387 	 * Tests for reading/writing PMU registers for implemented counters.
388 	 * Use each combination of PMEV{CNTR,TYPER}<n>_EL0 accessor functions.
389 	 */
390 	for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) {
391 		for (pmc = 0; pmc < pmcr_n; pmc++)
392 			test_access_pmc_regs(&pmc_accessors[i], pmc);
393 	}
394 
395 	/*
396 	 * Tests for reading/writing PMU registers for unimplemented counters.
397 	 * Use each combination of PMEV{CNTR,TYPER}<n>_EL0 accessor functions.
398 	 */
399 	for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) {
400 		for (pmc = pmcr_n; pmc < ARMV8_PMU_MAX_GENERAL_COUNTERS; pmc++)
401 			test_access_invalid_pmc_regs(&pmc_accessors[i], pmc);
402 	}
403 
404 	GUEST_DONE();
405 }
406 
407 /* Create a VM that has one vCPU with PMUv3 configured. */
create_vpmu_vm(void * guest_code)408 static void create_vpmu_vm(void *guest_code)
409 {
410 	struct kvm_vcpu_init init;
411 	uint8_t pmuver, ec;
412 	uint64_t dfr0, irq = 23;
413 	struct kvm_device_attr irq_attr = {
414 		.group = KVM_ARM_VCPU_PMU_V3_CTRL,
415 		.attr = KVM_ARM_VCPU_PMU_V3_IRQ,
416 		.addr = (uint64_t)&irq,
417 	};
418 	struct kvm_device_attr init_attr = {
419 		.group = KVM_ARM_VCPU_PMU_V3_CTRL,
420 		.attr = KVM_ARM_VCPU_PMU_V3_INIT,
421 	};
422 
423 	/* The test creates the vpmu_vm multiple times. Ensure a clean state */
424 	memset(&vpmu_vm, 0, sizeof(vpmu_vm));
425 
426 	vpmu_vm.vm = vm_create(1);
427 	vm_init_descriptor_tables(vpmu_vm.vm);
428 	for (ec = 0; ec < ESR_ELx_EC_MAX + 1; ec++) {
429 		vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec,
430 					guest_sync_handler);
431 	}
432 
433 	/* Create vCPU with PMUv3 */
434 	vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
435 	init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
436 	vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code);
437 	vcpu_init_descriptor_tables(vpmu_vm.vcpu);
438 	vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64);
439 	__TEST_REQUIRE(vpmu_vm.gic_fd >= 0,
440 		       "Failed to create vgic-v3, skipping");
441 
442 	/* Make sure that PMUv3 support is indicated in the ID register */
443 	dfr0 = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1));
444 	pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0);
445 	TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
446 		    pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
447 		    "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
448 
449 	/* Initialize vPMU */
450 	vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
451 	vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
452 }
453 
destroy_vpmu_vm(void)454 static void destroy_vpmu_vm(void)
455 {
456 	close(vpmu_vm.gic_fd);
457 	kvm_vm_free(vpmu_vm.vm);
458 }
459 
run_vcpu(struct kvm_vcpu * vcpu,uint64_t pmcr_n)460 static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n)
461 {
462 	struct ucall uc;
463 
464 	vcpu_args_set(vcpu, 1, pmcr_n);
465 	vcpu_run(vcpu);
466 	switch (get_ucall(vcpu, &uc)) {
467 	case UCALL_ABORT:
468 		REPORT_GUEST_ASSERT(uc);
469 		break;
470 	case UCALL_DONE:
471 		break;
472 	default:
473 		TEST_FAIL("Unknown ucall %lu", uc.cmd);
474 		break;
475 	}
476 }
477 
test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n,bool expect_fail)478 static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail)
479 {
480 	struct kvm_vcpu *vcpu;
481 	uint64_t pmcr, pmcr_orig;
482 
483 	create_vpmu_vm(guest_code);
484 	vcpu = vpmu_vm.vcpu;
485 
486 	pmcr_orig = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0));
487 	pmcr = pmcr_orig;
488 
489 	/*
490 	 * Setting a larger value of PMCR.N should not modify the field, and
491 	 * return a success.
492 	 */
493 	set_pmcr_n(&pmcr, pmcr_n);
494 	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr);
495 	pmcr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0));
496 
497 	if (expect_fail)
498 		TEST_ASSERT(pmcr_orig == pmcr,
499 			    "PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx",
500 			    pmcr, pmcr_n);
501 	else
502 		TEST_ASSERT(pmcr_n == get_pmcr_n(pmcr),
503 			    "Failed to update PMCR.N to %lu (received: %lu)",
504 			    pmcr_n, get_pmcr_n(pmcr));
505 }
506 
507 /*
508  * Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n,
509  * and run the test.
510  */
run_access_test(uint64_t pmcr_n)511 static void run_access_test(uint64_t pmcr_n)
512 {
513 	uint64_t sp;
514 	struct kvm_vcpu *vcpu;
515 	struct kvm_vcpu_init init;
516 
517 	pr_debug("Test with pmcr_n %lu\n", pmcr_n);
518 
519 	test_create_vpmu_vm_with_pmcr_n(pmcr_n, false);
520 	vcpu = vpmu_vm.vcpu;
521 
522 	/* Save the initial sp to restore them later to run the guest again */
523 	sp = vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1));
524 
525 	run_vcpu(vcpu, pmcr_n);
526 
527 	/*
528 	 * Reset and re-initialize the vCPU, and run the guest code again to
529 	 * check if PMCR_EL0.N is preserved.
530 	 */
531 	vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
532 	init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
533 	aarch64_vcpu_setup(vcpu, &init);
534 	vcpu_init_descriptor_tables(vcpu);
535 	vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), sp);
536 	vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
537 
538 	run_vcpu(vcpu, pmcr_n);
539 
540 	destroy_vpmu_vm();
541 }
542 
543 static struct pmreg_sets validity_check_reg_sets[] = {
544 	PMREG_SET(SYS_PMCNTENSET_EL0, SYS_PMCNTENCLR_EL0),
545 	PMREG_SET(SYS_PMINTENSET_EL1, SYS_PMINTENCLR_EL1),
546 	PMREG_SET(SYS_PMOVSSET_EL0, SYS_PMOVSCLR_EL0),
547 };
548 
549 /*
550  * Create a VM, and check if KVM handles the userspace accesses of
551  * the PMU register sets in @validity_check_reg_sets[] correctly.
552  */
run_pmregs_validity_test(uint64_t pmcr_n)553 static void run_pmregs_validity_test(uint64_t pmcr_n)
554 {
555 	int i;
556 	struct kvm_vcpu *vcpu;
557 	uint64_t set_reg_id, clr_reg_id, reg_val;
558 	uint64_t valid_counters_mask, max_counters_mask;
559 
560 	test_create_vpmu_vm_with_pmcr_n(pmcr_n, false);
561 	vcpu = vpmu_vm.vcpu;
562 
563 	valid_counters_mask = get_counters_mask(pmcr_n);
564 	max_counters_mask = get_counters_mask(ARMV8_PMU_MAX_COUNTERS);
565 
566 	for (i = 0; i < ARRAY_SIZE(validity_check_reg_sets); i++) {
567 		set_reg_id = validity_check_reg_sets[i].set_reg_id;
568 		clr_reg_id = validity_check_reg_sets[i].clr_reg_id;
569 
570 		/*
571 		 * Test if the 'set' and 'clr' variants of the registers
572 		 * are initialized based on the number of valid counters.
573 		 */
574 		reg_val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id));
575 		TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
576 			    "Initial read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
577 			    KVM_ARM64_SYS_REG(set_reg_id), reg_val);
578 
579 		reg_val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id));
580 		TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
581 			    "Initial read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
582 			    KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
583 
584 		/*
585 		 * Using the 'set' variant, force-set the register to the
586 		 * max number of possible counters and test if KVM discards
587 		 * the bits for unimplemented counters as it should.
588 		 */
589 		vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), max_counters_mask);
590 
591 		reg_val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id));
592 		TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
593 			    "Read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
594 			    KVM_ARM64_SYS_REG(set_reg_id), reg_val);
595 
596 		reg_val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id));
597 		TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
598 			    "Read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
599 			    KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
600 	}
601 
602 	destroy_vpmu_vm();
603 }
604 
605 /*
606  * Create a guest with one vCPU, and attempt to set the PMCR_EL0.N for
607  * the vCPU to @pmcr_n, which is larger than the host value.
608  * The attempt should fail as @pmcr_n is too big to set for the vCPU.
609  */
run_error_test(uint64_t pmcr_n)610 static void run_error_test(uint64_t pmcr_n)
611 {
612 	pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n);
613 
614 	test_create_vpmu_vm_with_pmcr_n(pmcr_n, true);
615 	destroy_vpmu_vm();
616 }
617 
618 /*
619  * Return the default number of implemented PMU event counters excluding
620  * the cycle counter (i.e. PMCR_EL0.N value) for the guest.
621  */
get_pmcr_n_limit(void)622 static uint64_t get_pmcr_n_limit(void)
623 {
624 	uint64_t pmcr;
625 
626 	create_vpmu_vm(guest_code);
627 	pmcr = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0));
628 	destroy_vpmu_vm();
629 	return get_pmcr_n(pmcr);
630 }
631 
main(void)632 int main(void)
633 {
634 	uint64_t i, pmcr_n;
635 
636 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
637 
638 	pmcr_n = get_pmcr_n_limit();
639 	for (i = 0; i <= pmcr_n; i++) {
640 		run_access_test(i);
641 		run_pmregs_validity_test(i);
642 	}
643 
644 	for (i = pmcr_n + 1; i < ARMV8_PMU_MAX_COUNTERS; i++)
645 		run_error_test(i);
646 
647 	return 0;
648 }
649