xref: /aosp_15_r20/external/libtraceevent/plugins/plugin_kvm.c (revision 436bf2bcd5202612ffffe471bbcc1f277cc8d28e)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  * Copyright (C) 2009 Red Hat Inc, Steven Rostedt <[email protected]>
4  */
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <stdint.h>
9 
10 #include "event-parse.h"
11 #include "trace-seq.h"
12 
13 #define __weak __attribute__((weak))
14 
15 #ifdef HAVE_UDIS86
16 
17 #include <udis86.h>
18 
19 static ud_t ud;
20 
init_disassembler(void)21 static void init_disassembler(void)
22 {
23 	ud_init(&ud);
24 	ud_set_syntax(&ud, UD_SYN_ATT);
25 }
26 
disassemble(unsigned char * insn,int len,uint64_t rip,int cr0_pe,int eflags_vm,int cs_d,int cs_l)27 static const char *disassemble(unsigned char *insn, int len, uint64_t rip,
28 			       int cr0_pe, int eflags_vm,
29 			       int cs_d, int cs_l)
30 {
31 	int mode;
32 
33 	if (!cr0_pe)
34 		mode = 16;
35 	else if (eflags_vm)
36 		mode = 16;
37 	else if (cs_l)
38 		mode = 64;
39 	else if (cs_d)
40 		mode = 32;
41 	else
42 		mode = 16;
43 
44 	ud_set_pc(&ud, rip);
45 	ud_set_mode(&ud, mode);
46 	ud_set_input_buffer(&ud, insn, len);
47 	ud_disassemble(&ud);
48 	return ud_insn_asm(&ud);
49 }
50 
51 #else
52 
init_disassembler(void)53 static void init_disassembler(void)
54 {
55 }
56 
disassemble(unsigned char * insn,int len,uint64_t rip,int cr0_pe,int eflags_vm,int cs_d,int cs_l)57 static const char *disassemble(unsigned char *insn, int len, uint64_t rip,
58 			       int cr0_pe, int eflags_vm,
59 			       int cs_d, int cs_l)
60 {
61 	static char out[15*3+1];
62 	int i;
63 
64 	for (i = 0; i < len; ++i)
65 		sprintf(out + i * 3, "%02x ", insn[i]);
66 	out[len*3-1] = '\0';
67 	return out;
68 }
69 
70 #endif
71 
72 
73 #define VMX_EXIT_REASONS			\
74 	_ER(EXCEPTION_NMI,	 0)		\
75 	_ER(EXTERNAL_INTERRUPT,	 1)		\
76 	_ER(TRIPLE_FAULT,	 2)		\
77 	_ER(PENDING_INTERRUPT,	 7)		\
78 	_ER(NMI_WINDOW,		 8)		\
79 	_ER(TASK_SWITCH,	 9)		\
80 	_ER(CPUID,		 10)		\
81 	_ER(HLT,		 12)		\
82 	_ER(INVD,		 13)		\
83 	_ER(INVLPG,		 14)		\
84 	_ER(RDPMC,		 15)		\
85 	_ER(RDTSC,		 16)		\
86 	_ER(VMCALL,		 18)		\
87 	_ER(VMCLEAR,		 19)		\
88 	_ER(VMLAUNCH,		 20)		\
89 	_ER(VMPTRLD,		 21)		\
90 	_ER(VMPTRST,		 22)		\
91 	_ER(VMREAD,		 23)		\
92 	_ER(VMRESUME,		 24)		\
93 	_ER(VMWRITE,		 25)		\
94 	_ER(VMOFF,		 26)		\
95 	_ER(VMON,		 27)		\
96 	_ER(CR_ACCESS,		 28)		\
97 	_ER(DR_ACCESS,		 29)		\
98 	_ER(IO_INSTRUCTION,	 30)		\
99 	_ER(MSR_READ,		 31)		\
100 	_ER(MSR_WRITE,		 32)		\
101 	_ER(MWAIT_INSTRUCTION,	 36)		\
102 	_ER(MONITOR_INSTRUCTION, 39)		\
103 	_ER(PAUSE_INSTRUCTION,	 40)		\
104 	_ER(MCE_DURING_VMENTRY,	 41)		\
105 	_ER(TPR_BELOW_THRESHOLD, 43)		\
106 	_ER(APIC_ACCESS,	 44)		\
107 	_ER(EOI_INDUCED,	 45)		\
108 	_ER(EPT_VIOLATION,	 48)		\
109 	_ER(EPT_MISCONFIG,	 49)		\
110 	_ER(INVEPT,		 50)		\
111 	_ER(PREEMPTION_TIMER,	 52)		\
112 	_ER(WBINVD,		 54)		\
113 	_ER(XSETBV,		 55)		\
114 	_ER(APIC_WRITE,		 56)		\
115 	_ER(INVPCID,		 58)		\
116 	_ER(PML_FULL,		 62)		\
117 	_ER(XSAVES,		 63)		\
118 	_ER(XRSTORS,		 64)
119 
120 #define SVM_EXIT_REASONS \
121 	_ER(EXIT_READ_CR0,	0x000)		\
122 	_ER(EXIT_READ_CR3,	0x003)		\
123 	_ER(EXIT_READ_CR4,	0x004)		\
124 	_ER(EXIT_READ_CR8,	0x008)		\
125 	_ER(EXIT_WRITE_CR0,	0x010)		\
126 	_ER(EXIT_WRITE_CR3,	0x013)		\
127 	_ER(EXIT_WRITE_CR4,	0x014)		\
128 	_ER(EXIT_WRITE_CR8,	0x018)		\
129 	_ER(EXIT_READ_DR0,	0x020)		\
130 	_ER(EXIT_READ_DR1,	0x021)		\
131 	_ER(EXIT_READ_DR2,	0x022)		\
132 	_ER(EXIT_READ_DR3,	0x023)		\
133 	_ER(EXIT_READ_DR4,	0x024)		\
134 	_ER(EXIT_READ_DR5,	0x025)		\
135 	_ER(EXIT_READ_DR6,	0x026)		\
136 	_ER(EXIT_READ_DR7,	0x027)		\
137 	_ER(EXIT_WRITE_DR0,	0x030)		\
138 	_ER(EXIT_WRITE_DR1,	0x031)		\
139 	_ER(EXIT_WRITE_DR2,	0x032)		\
140 	_ER(EXIT_WRITE_DR3,	0x033)		\
141 	_ER(EXIT_WRITE_DR4,	0x034)		\
142 	_ER(EXIT_WRITE_DR5,	0x035)		\
143 	_ER(EXIT_WRITE_DR6,	0x036)		\
144 	_ER(EXIT_WRITE_DR7,	0x037)		\
145 	_ER(EXIT_EXCP_DE,	0x040)		\
146 	_ER(EXIT_EXCP_DB,	0x041)		\
147 	_ER(EXIT_EXCP_BP,	0x043)		\
148 	_ER(EXIT_EXCP_OF,	0x044)		\
149 	_ER(EXIT_EXCP_BR,	0x045)		\
150 	_ER(EXIT_EXCP_UD,	0x046)		\
151 	_ER(EXIT_EXCP_NM,	0x047)		\
152 	_ER(EXIT_EXCP_DF,	0x048)		\
153 	_ER(EXIT_EXCP_TS,	0x04a)		\
154 	_ER(EXIT_EXCP_NP,	0x04b)		\
155 	_ER(EXIT_EXCP_SS,	0x04c)		\
156 	_ER(EXIT_EXCP_GP,	0x04d)		\
157 	_ER(EXIT_EXCP_PF,	0x04e)		\
158 	_ER(EXIT_EXCP_MF,	0x050)		\
159 	_ER(EXIT_EXCP_AC,	0x051)		\
160 	_ER(EXIT_EXCP_MC,	0x052)		\
161 	_ER(EXIT_EXCP_XF,	0x053)		\
162 	_ER(EXIT_INTR,		0x060)		\
163 	_ER(EXIT_NMI,		0x061)		\
164 	_ER(EXIT_SMI,		0x062)		\
165 	_ER(EXIT_INIT,		0x063)		\
166 	_ER(EXIT_VINTR,		0x064)		\
167 	_ER(EXIT_CR0_SEL_WRITE,	0x065)		\
168 	_ER(EXIT_IDTR_READ,	0x066)		\
169 	_ER(EXIT_GDTR_READ,	0x067)		\
170 	_ER(EXIT_LDTR_READ,	0x068)		\
171 	_ER(EXIT_TR_READ,	0x069)		\
172 	_ER(EXIT_IDTR_WRITE,	0x06a)		\
173 	_ER(EXIT_GDTR_WRITE,	0x06b)		\
174 	_ER(EXIT_LDTR_WRITE,	0x06c)		\
175 	_ER(EXIT_TR_WRITE,	0x06d)		\
176 	_ER(EXIT_RDTSC,		0x06e)		\
177 	_ER(EXIT_RDPMC,		0x06f)		\
178 	_ER(EXIT_PUSHF,		0x070)		\
179 	_ER(EXIT_POPF,		0x071)		\
180 	_ER(EXIT_CPUID,		0x072)		\
181 	_ER(EXIT_RSM,		0x073)		\
182 	_ER(EXIT_IRET,		0x074)		\
183 	_ER(EXIT_SWINT,		0x075)		\
184 	_ER(EXIT_INVD,		0x076)		\
185 	_ER(EXIT_PAUSE,		0x077)		\
186 	_ER(EXIT_HLT,		0x078)		\
187 	_ER(EXIT_INVLPG,	0x079)		\
188 	_ER(EXIT_INVLPGA,	0x07a)		\
189 	_ER(EXIT_IOIO,		0x07b)		\
190 	_ER(EXIT_MSR,		0x07c)		\
191 	_ER(EXIT_TASK_SWITCH,	0x07d)		\
192 	_ER(EXIT_FERR_FREEZE,	0x07e)		\
193 	_ER(EXIT_SHUTDOWN,	0x07f)		\
194 	_ER(EXIT_VMRUN,		0x080)		\
195 	_ER(EXIT_VMMCALL,	0x081)		\
196 	_ER(EXIT_VMLOAD,	0x082)		\
197 	_ER(EXIT_VMSAVE,	0x083)		\
198 	_ER(EXIT_STGI,		0x084)		\
199 	_ER(EXIT_CLGI,		0x085)		\
200 	_ER(EXIT_SKINIT,	0x086)		\
201 	_ER(EXIT_RDTSCP,	0x087)		\
202 	_ER(EXIT_ICEBP,		0x088)		\
203 	_ER(EXIT_WBINVD,	0x089)		\
204 	_ER(EXIT_MONITOR,	0x08a)		\
205 	_ER(EXIT_MWAIT,		0x08b)		\
206 	_ER(EXIT_MWAIT_COND,	0x08c)		\
207 	_ER(EXIT_XSETBV,	0x08d)		\
208 	_ER(EXIT_NPF, 		0x400)		\
209 	_ER(EXIT_AVIC_INCOMPLETE_IPI,		0x401)	\
210 	_ER(EXIT_AVIC_UNACCELERATED_ACCESS,	0x402)	\
211 	_ER(EXIT_ERR,		-1)
212 
213 #define _ER(reason, val)	{ #reason, val },
214 struct str_values {
215 	const char	*str;
216 	int		val;
217 };
218 
219 static struct str_values vmx_exit_reasons[] = {
220 	VMX_EXIT_REASONS
221 	{ NULL, -1}
222 };
223 
224 static struct str_values svm_exit_reasons[] = {
225 	SVM_EXIT_REASONS
226 	{ NULL, -1}
227 };
228 
229 static struct isa_exit_reasons {
230 	unsigned isa;
231 	struct str_values *strings;
232 } isa_exit_reasons[] = {
233 	{ .isa = 1, .strings = vmx_exit_reasons },
234 	{ .isa = 2, .strings = svm_exit_reasons },
235 	{ }
236 };
237 
find_exit_reason(unsigned isa,int val)238 static const char *find_exit_reason(unsigned isa, int val)
239 {
240 	struct str_values *strings = NULL;
241 	int i;
242 
243 	for (i = 0; isa_exit_reasons[i].strings; ++i)
244 		if (isa_exit_reasons[i].isa == isa) {
245 			strings = isa_exit_reasons[i].strings;
246 			break;
247 		}
248 	if (!strings)
249 		return "UNKNOWN-ISA";
250 	for (i = 0; strings[i].str; i++)
251 		if (strings[i].val == val)
252 			break;
253 
254 	return strings[i].str;
255 }
256 
print_exit_reason(struct trace_seq * s,struct tep_record * record,struct tep_event * event,const char * field)257 static int print_exit_reason(struct trace_seq *s, struct tep_record *record,
258 			     struct tep_event *event, const char *field)
259 {
260 	unsigned long long isa;
261 	unsigned long long val;
262 	const char *reason;
263 
264 	if (tep_get_field_val(s, event, field, record, &val, 1) < 0)
265 		return -1;
266 
267 	if (tep_get_field_val(s, event, "isa", record, &isa, 0) < 0)
268 		isa = 1;
269 
270 	reason = find_exit_reason(isa, val);
271 	if (reason)
272 		trace_seq_printf(s, "reason %s", reason);
273 	else
274 		trace_seq_printf(s, "reason UNKNOWN (%llu)", val);
275 	return 0;
276 }
277 
tep_plugin_kvm_get_func(struct tep_event * event,struct tep_record * record,unsigned long long * val)278 __weak const char *tep_plugin_kvm_get_func(struct tep_event *event,
279 					   struct tep_record *record,
280 					   unsigned long long *val)
281 {
282 	return NULL;
283 }
284 
tep_plugin_kvm_put_func(const char * func)285 __weak void tep_plugin_kvm_put_func(const char *func)
286 {
287 }
288 
289 
add_rip_function(struct trace_seq * s,struct tep_record * record,struct tep_event * event,unsigned long long rip)290 static void add_rip_function(struct trace_seq *s, struct tep_record *record,
291 			     struct tep_event *event, unsigned long long rip)
292 {
293 	unsigned long long ip = rip;
294 	const char *func;
295 
296 	func = tep_plugin_kvm_get_func(event, record, &ip);
297 	if (func) {
298 		trace_seq_printf(s, " %s", func);
299 		/* The application may upate ip to the start of the function */
300 		if (ip != rip)
301 			trace_seq_printf(s, "+0x%0llx", rip - ip);
302 		tep_plugin_kvm_put_func(func);
303 	}
304 }
305 
kvm_exit_handler(struct trace_seq * s,struct tep_record * record,struct tep_event * event,void * context)306 static int kvm_exit_handler(struct trace_seq *s, struct tep_record *record,
307 			    struct tep_event *event, void *context)
308 {
309 	unsigned long long info1 = 0, info2 = 0;
310 	unsigned long long rip;
311 
312 	if (print_exit_reason(s, record, event, "exit_reason") < 0)
313 		return -1;
314 
315 	if (tep_get_field_val(s, event, "guest_rip", record, &rip, 1) < 0)
316 		return -1;
317 
318 	trace_seq_printf(s, " rip 0x%llx", rip);
319 
320 	add_rip_function(s, record, event, rip);
321 
322 	if (tep_get_field_val(s, event, "info1", record, &info1, 0) >= 0
323 	    && tep_get_field_val(s, event, "info2", record, &info2, 0) >= 0)
324 		trace_seq_printf(s, " info %llx %llx", info1, info2);
325 
326 	return 0;
327 }
328 
kvm_entry_handler(struct trace_seq * s,struct tep_record * record,struct tep_event * event,void * context)329 static int kvm_entry_handler(struct trace_seq *s, struct tep_record *record,
330 			    struct tep_event *event, void *context)
331 {
332 	unsigned long long rip;
333 
334 	tep_print_num_field(s, " vcpu %u", event, "vcpu_id", record, 1);
335 
336 	if (tep_get_field_val(s, event, "rip", record, &rip, 1) < 0)
337 		return -1;
338 
339 	trace_seq_printf(s, " rip 0x%llx", rip);
340 	add_rip_function(s, record, event, rip);
341 
342 	return 0;
343 }
344 
345 #define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
346 #define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
347 #define KVM_EMUL_INSN_F_CS_D   (1 << 2)
348 #define KVM_EMUL_INSN_F_CS_L   (1 << 3)
349 
kvm_emulate_insn_handler(struct trace_seq * s,struct tep_record * record,struct tep_event * event,void * context)350 static int kvm_emulate_insn_handler(struct trace_seq *s,
351 				    struct tep_record *record,
352 				    struct tep_event *event, void *context)
353 {
354 	unsigned long long rip, csbase, len, flags, failed;
355 	int llen;
356 	uint8_t *insn;
357 	const char *disasm;
358 
359 	if (tep_get_field_val(s, event, "rip", record, &rip, 1) < 0)
360 		return -1;
361 
362 	if (tep_get_field_val(s, event, "csbase", record, &csbase, 1) < 0)
363 		return -1;
364 
365 	if (tep_get_field_val(s, event, "len", record, &len, 1) < 0)
366 		return -1;
367 
368 	if (tep_get_field_val(s, event, "flags", record, &flags, 1) < 0)
369 		return -1;
370 
371 	if (tep_get_field_val(s, event, "failed", record, &failed, 1) < 0)
372 		return -1;
373 
374 	insn = tep_get_field_raw(s, event, "insn", record, &llen, 1);
375 	if (!insn)
376 		return -1;
377 
378 	disasm = disassemble(insn, len, rip,
379 			     flags & KVM_EMUL_INSN_F_CR0_PE,
380 			     flags & KVM_EMUL_INSN_F_EFL_VM,
381 			     flags & KVM_EMUL_INSN_F_CS_D,
382 			     flags & KVM_EMUL_INSN_F_CS_L);
383 
384 	trace_seq_printf(s, "%llx:%llx", csbase, rip);
385 	add_rip_function(s, record, event, rip);
386 	trace_seq_printf(s, ": %s%s", disasm, failed ? " FAIL" : "");
387 	return 0;
388 }
389 
kvm_nested_vmexit_inject_handler(struct trace_seq * s,struct tep_record * record,struct tep_event * event,void * context)390 static int kvm_nested_vmexit_inject_handler(struct trace_seq *s, struct tep_record *record,
391 					    struct tep_event *event, void *context)
392 {
393 	if (print_exit_reason(s, record, event, "exit_code") < 0)
394 		return -1;
395 
396 	tep_print_num_field(s, " info1 %llx", event, "exit_info1", record, 1);
397 	tep_print_num_field(s, " info2 %llx", event, "exit_info2", record, 1);
398 	tep_print_num_field(s, " int_info %llx", event, "exit_int_info", record, 1);
399 	tep_print_num_field(s, " int_info_err %llx", event, "exit_int_info_err", record, 1);
400 
401 	return 0;
402 }
403 
kvm_nested_vmexit_handler(struct trace_seq * s,struct tep_record * record,struct tep_event * event,void * context)404 static int kvm_nested_vmexit_handler(struct trace_seq *s, struct tep_record *record,
405 				     struct tep_event *event, void *context)
406 {
407 	unsigned long long rip;
408 
409 	if (tep_get_field_val(s, event, "rip", record, &rip, 1) < 0)
410 		return -1;
411 
412 	trace_seq_printf(s, " rip %llx", rip);
413 	add_rip_function(s, record, event, rip);
414 
415 	return kvm_nested_vmexit_inject_handler(s, record, event, context);
416 }
417 
418 union kvm_mmu_page_role {
419 	unsigned word;
420 	struct {
421 		unsigned level:4;
422 		unsigned cr4_pae:1;
423 		unsigned quadrant:2;
424 		unsigned direct:1;
425 		unsigned access:3;
426 		unsigned invalid:1;
427 		unsigned nxe:1;
428 		unsigned cr0_wp:1;
429 		unsigned smep_and_not_wp:1;
430 		unsigned smap_and_not_wp:1;
431 		unsigned pad_for_nice_hex_output:8;
432 		unsigned smm:8;
433 	};
434 };
435 
kvm_mmu_print_role(struct trace_seq * s,struct tep_record * record,struct tep_event * event,void * context)436 static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record,
437 			      struct tep_event *event, void *context)
438 {
439 	unsigned long long val;
440 	static const char *access_str[] = {
441 		"---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux"
442 	};
443 	union kvm_mmu_page_role role;
444 
445 	if (tep_get_field_val(s, event, "role", record, &val, 1) < 0)
446 		return -1;
447 
448 	role.word = (int)val;
449 
450 	/*
451 	 * We can only use the structure if file is of the same
452 	 * endianness.
453 	 */
454 	if (tep_is_file_bigendian(event->tep) ==
455 	    tep_is_local_bigendian(event->tep)) {
456 
457 		trace_seq_printf(s, "%u q%u%s %s%s %spae %snxe %swp%s%s%s",
458 				 role.level,
459 				 role.quadrant,
460 				 role.direct ? " direct" : "",
461 				 access_str[role.access],
462 				 role.invalid ? " invalid" : "",
463 				 role.cr4_pae ? "" : "!",
464 				 role.nxe ? "" : "!",
465 				 role.cr0_wp ? "" : "!",
466 				 role.smep_and_not_wp ? " smep" : "",
467 				 role.smap_and_not_wp ? " smap" : "",
468 				 role.smm ? " smm" : "");
469 	} else
470 		trace_seq_printf(s, "WORD: %08x", role.word);
471 
472 	tep_print_num_field(s, " root %u ",  event,
473 			    "root_count", record, 1);
474 
475 	if (tep_get_field_val(s, event, "unsync", record, &val, 1) < 0)
476 		return -1;
477 
478 	trace_seq_printf(s, "%s%c",  val ? "unsync" : "sync", 0);
479 	return 0;
480 }
481 
kvm_mmu_get_page_handler(struct trace_seq * s,struct tep_record * record,struct tep_event * event,void * context)482 static int kvm_mmu_get_page_handler(struct trace_seq *s,
483 				    struct tep_record *record,
484 				    struct tep_event *event, void *context)
485 {
486 	unsigned long long val;
487 
488 	if (tep_get_field_val(s, event, "created", record, &val, 1) < 0)
489 		return -1;
490 
491 	trace_seq_printf(s, "%s ", val ? "new" : "existing");
492 
493 	if (tep_get_field_val(s, event, "gfn", record, &val, 1) < 0)
494 		return -1;
495 
496 	trace_seq_printf(s, "sp gfn %llx ", val);
497 	return kvm_mmu_print_role(s, record, event, context);
498 }
499 
500 #define PT_WRITABLE_SHIFT 1
501 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
502 
503 static unsigned long long
process_is_writable_pte(struct trace_seq * s,unsigned long long * args)504 process_is_writable_pte(struct trace_seq *s, unsigned long long *args)
505 {
506 	unsigned long pte = args[0];
507 	return pte & PT_WRITABLE_MASK;
508 }
509 
TEP_PLUGIN_LOADER(struct tep_handle * tep)510 int TEP_PLUGIN_LOADER(struct tep_handle *tep)
511 {
512 	init_disassembler();
513 
514 	tep_register_event_handler(tep, -1, "kvm", "kvm_exit",
515 				   kvm_exit_handler, NULL);
516 
517 	tep_register_event_handler(tep, -1, "kvm", "kvm_entry",
518 				   kvm_entry_handler, NULL);
519 
520 	tep_register_event_handler(tep, -1, "kvm", "kvm_emulate_insn",
521 				   kvm_emulate_insn_handler, NULL);
522 
523 	tep_register_event_handler(tep, -1, "kvm", "kvm_nested_vmexit",
524 				   kvm_nested_vmexit_handler, NULL);
525 
526 	tep_register_event_handler(tep, -1, "kvm", "kvm_nested_vmexit_inject",
527 				   kvm_nested_vmexit_inject_handler, NULL);
528 
529 	tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_get_page",
530 				   kvm_mmu_get_page_handler, NULL);
531 
532 	tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_sync_page",
533 				   kvm_mmu_print_role, NULL);
534 
535 	tep_register_event_handler(tep, -1,
536 				   "kvmmmu", "kvm_mmu_unsync_page",
537 				   kvm_mmu_print_role, NULL);
538 
539 	tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_zap_page",
540 				   kvm_mmu_print_role, NULL);
541 
542 	tep_register_event_handler(tep, -1, "kvmmmu",
543 			"kvm_mmu_prepare_zap_page", kvm_mmu_print_role,
544 			NULL);
545 
546 	tep_register_print_function(tep,
547 				    process_is_writable_pte,
548 				    TEP_FUNC_ARG_INT,
549 				    "is_writable_pte",
550 				    TEP_FUNC_ARG_LONG,
551 				    TEP_FUNC_ARG_VOID);
552 	return 0;
553 }
554 
TEP_PLUGIN_UNLOADER(struct tep_handle * tep)555 void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
556 {
557 	tep_unregister_event_handler(tep, -1, "kvm", "kvm_exit",
558 				     kvm_exit_handler, NULL);
559 
560 	tep_unregister_event_handler(tep, -1, "kvm", "kvm_entry",
561 				     kvm_entry_handler, NULL);
562 
563 	tep_unregister_event_handler(tep, -1, "kvm", "kvm_emulate_insn",
564 				     kvm_emulate_insn_handler, NULL);
565 
566 	tep_unregister_event_handler(tep, -1, "kvm", "kvm_nested_vmexit",
567 				     kvm_nested_vmexit_handler, NULL);
568 
569 	tep_unregister_event_handler(tep, -1, "kvm", "kvm_nested_vmexit_inject",
570 				     kvm_nested_vmexit_inject_handler, NULL);
571 
572 	tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_get_page",
573 				     kvm_mmu_get_page_handler, NULL);
574 
575 	tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_sync_page",
576 				     kvm_mmu_print_role, NULL);
577 
578 	tep_unregister_event_handler(tep, -1,
579 				     "kvmmmu", "kvm_mmu_unsync_page",
580 				     kvm_mmu_print_role, NULL);
581 
582 	tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_zap_page",
583 				     kvm_mmu_print_role, NULL);
584 
585 	tep_unregister_event_handler(tep, -1, "kvmmmu",
586 			"kvm_mmu_prepare_zap_page", kvm_mmu_print_role,
587 			NULL);
588 
589 	tep_unregister_print_function(tep, process_is_writable_pte,
590 				      "is_writable_pte");
591 }
592