1 /* SPDX-License-Identifier: GPL-2.0 */
2 #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
3 #define _TRACE_KVM_H
4 
5 #include <linux/tracepoint.h>
6 #include <asm/vmx.h>
7 #include <asm/svm.h>
8 #include <asm/clocksource.h>
9 #include <asm/pvclock-abi.h>
10 
11 #undef TRACE_SYSTEM
12 #define TRACE_SYSTEM kvm
13 
14 /*
15  * Tracepoint for guest mode entry.
16  */
17 TRACE_EVENT(kvm_entry,
18 	TP_PROTO(struct kvm_vcpu *vcpu, bool force_immediate_exit),
19 	TP_ARGS(vcpu, force_immediate_exit),
20 
21 	TP_STRUCT__entry(
22 		__field(	unsigned int,	vcpu_id		)
23 		__field(	unsigned long,	rip		)
24 		__field(	bool,		immediate_exit	)
25 		__field(	u32,		intr_info	)
26 		__field(	u32,		error_code	)
27 	),
28 
29 	TP_fast_assign(
30 		__entry->vcpu_id        = vcpu->vcpu_id;
31 		__entry->rip		= kvm_rip_read(vcpu);
32 		__entry->immediate_exit	= force_immediate_exit;
33 
34 		kvm_x86_call(get_entry_info)(vcpu, &__entry->intr_info,
35 					     &__entry->error_code);
36 	),
37 
38 	TP_printk("vcpu %u, rip 0x%lx intr_info 0x%08x error_code 0x%08x%s",
39 		  __entry->vcpu_id, __entry->rip,
40 		  __entry->intr_info, __entry->error_code,
41 		  __entry->immediate_exit ? "[immediate exit]" : "")
42 );
43 
44 /*
45  * Tracepoint for hypercall.
46  */
47 TRACE_EVENT(kvm_hypercall,
48 	TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
49 		 unsigned long a2, unsigned long a3),
50 	TP_ARGS(nr, a0, a1, a2, a3),
51 
52 	TP_STRUCT__entry(
53 		__field(	unsigned long, 	nr		)
54 		__field(	unsigned long,	a0		)
55 		__field(	unsigned long,	a1		)
56 		__field(	unsigned long,	a2		)
57 		__field(	unsigned long,	a3		)
58 	),
59 
60 	TP_fast_assign(
61 		__entry->nr		= nr;
62 		__entry->a0		= a0;
63 		__entry->a1		= a1;
64 		__entry->a2		= a2;
65 		__entry->a3		= a3;
66 	),
67 
68 	TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
69 		 __entry->nr, __entry->a0, __entry->a1,  __entry->a2,
70 		 __entry->a3)
71 );
72 
73 /*
74  * Tracepoint for hypercall.
75  */
76 TRACE_EVENT(kvm_hv_hypercall,
77 	TP_PROTO(__u16 code, bool fast,  __u16 var_cnt, __u16 rep_cnt,
78 		 __u16 rep_idx, __u64 ingpa, __u64 outgpa),
79 	TP_ARGS(code, fast, var_cnt, rep_cnt, rep_idx, ingpa, outgpa),
80 
81 	TP_STRUCT__entry(
82 		__field(	__u16,		rep_cnt		)
83 		__field(	__u16,		rep_idx		)
84 		__field(	__u64,		ingpa		)
85 		__field(	__u64,		outgpa		)
86 		__field(	__u16, 		code		)
87 		__field(	__u16,		var_cnt		)
88 		__field(	bool,		fast		)
89 	),
90 
91 	TP_fast_assign(
92 		__entry->rep_cnt	= rep_cnt;
93 		__entry->rep_idx	= rep_idx;
94 		__entry->ingpa		= ingpa;
95 		__entry->outgpa		= outgpa;
96 		__entry->code		= code;
97 		__entry->var_cnt	= var_cnt;
98 		__entry->fast		= fast;
99 	),
100 
101 	TP_printk("code 0x%x %s var_cnt 0x%x rep_cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
102 		  __entry->code, __entry->fast ? "fast" : "slow",
103 		  __entry->var_cnt, __entry->rep_cnt, __entry->rep_idx,
104 		  __entry->ingpa, __entry->outgpa)
105 );
106 
107 TRACE_EVENT(kvm_hv_hypercall_done,
108 	TP_PROTO(u64 result),
109 	TP_ARGS(result),
110 
111 	TP_STRUCT__entry(
112 		__field(__u64, result)
113 	),
114 
115 	TP_fast_assign(
116 		__entry->result	= result;
117 	),
118 
119 	TP_printk("result 0x%llx", __entry->result)
120 );
121 
122 /*
123  * Tracepoint for Xen hypercall.
124  */
125 TRACE_EVENT(kvm_xen_hypercall,
126 	    TP_PROTO(u8 cpl, unsigned long nr,
127 		     unsigned long a0, unsigned long a1, unsigned long a2,
128 		     unsigned long a3, unsigned long a4, unsigned long a5),
129 	    TP_ARGS(cpl, nr, a0, a1, a2, a3, a4, a5),
130 
131 	TP_STRUCT__entry(
132 		__field(u8, cpl)
133 		__field(unsigned long, nr)
134 		__field(unsigned long, a0)
135 		__field(unsigned long, a1)
136 		__field(unsigned long, a2)
137 		__field(unsigned long, a3)
138 		__field(unsigned long, a4)
139 		__field(unsigned long, a5)
140 	),
141 
142 	TP_fast_assign(
143 		__entry->cpl = cpl;
144 		__entry->nr = nr;
145 		__entry->a0 = a0;
146 		__entry->a1 = a1;
147 		__entry->a2 = a2;
148 		__entry->a3 = a3;
149 		__entry->a4 = a4;
150 		__entry->a4 = a5;
151 	),
152 
153 	TP_printk("cpl %d nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx a4 0x%lx a5 %lx",
154 		  __entry->cpl, __entry->nr,
155 		  __entry->a0, __entry->a1, __entry->a2,
156 		  __entry->a3, __entry->a4, __entry->a5)
157 );
158 
159 
160 
161 /*
162  * Tracepoint for PIO.
163  */
164 
165 #define KVM_PIO_IN   0
166 #define KVM_PIO_OUT  1
167 
168 TRACE_EVENT(kvm_pio,
169 	TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
170 		 unsigned int count, const void *data),
171 	TP_ARGS(rw, port, size, count, data),
172 
173 	TP_STRUCT__entry(
174 		__field(	unsigned int, 	rw		)
175 		__field(	unsigned int, 	port		)
176 		__field(	unsigned int, 	size		)
177 		__field(	unsigned int,	count		)
178 		__field(	unsigned int,	val		)
179 	),
180 
181 	TP_fast_assign(
182 		__entry->rw		= rw;
183 		__entry->port		= port;
184 		__entry->size		= size;
185 		__entry->count		= count;
186 		if (size == 1)
187 			__entry->val	= *(unsigned char *)data;
188 		else if (size == 2)
189 			__entry->val	= *(unsigned short *)data;
190 		else
191 			__entry->val	= *(unsigned int *)data;
192 	),
193 
194 	TP_printk("pio_%s at 0x%x size %d count %d val 0x%x %s",
195 		  __entry->rw ? "write" : "read",
196 		  __entry->port, __entry->size, __entry->count, __entry->val,
197 		  __entry->count > 1 ? "(...)" : "")
198 );
199 
200 /*
201  * Tracepoint for fast mmio.
202  */
203 TRACE_EVENT(kvm_fast_mmio,
204 	TP_PROTO(u64 gpa),
205 	TP_ARGS(gpa),
206 
207 	TP_STRUCT__entry(
208 		__field(u64,	gpa)
209 	),
210 
211 	TP_fast_assign(
212 		__entry->gpa		= gpa;
213 	),
214 
215 	TP_printk("fast mmio at gpa 0x%llx", __entry->gpa)
216 );
217 
218 /*
219  * Tracepoint for cpuid.
220  */
221 TRACE_EVENT(kvm_cpuid,
222 	TP_PROTO(unsigned int function, unsigned int index, unsigned long rax,
223 		 unsigned long rbx, unsigned long rcx, unsigned long rdx,
224 		 bool found, bool used_max_basic),
225 	TP_ARGS(function, index, rax, rbx, rcx, rdx, found, used_max_basic),
226 
227 	TP_STRUCT__entry(
228 		__field(	unsigned int,	function	)
229 		__field(	unsigned int,	index		)
230 		__field(	unsigned long,	rax		)
231 		__field(	unsigned long,	rbx		)
232 		__field(	unsigned long,	rcx		)
233 		__field(	unsigned long,	rdx		)
234 		__field(	bool,		found		)
235 		__field(	bool,		used_max_basic	)
236 	),
237 
238 	TP_fast_assign(
239 		__entry->function	= function;
240 		__entry->index		= index;
241 		__entry->rax		= rax;
242 		__entry->rbx		= rbx;
243 		__entry->rcx		= rcx;
244 		__entry->rdx		= rdx;
245 		__entry->found		= found;
246 		__entry->used_max_basic	= used_max_basic;
247 	),
248 
249 	TP_printk("func %x idx %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s%s",
250 		  __entry->function, __entry->index, __entry->rax,
251 		  __entry->rbx, __entry->rcx, __entry->rdx,
252 		  __entry->found ? "found" : "not found",
253 		  __entry->used_max_basic ? ", used max basic" : "")
254 );
255 
256 #define AREG(x) { APIC_##x, "APIC_" #x }
257 
258 #define kvm_trace_symbol_apic						    \
259 	AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI),    \
260 	AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR),  \
261 	AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
262 	AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR),   \
263 	AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT),  \
264 	AREG(ECTRL)
265 /*
266  * Tracepoint for apic access.
267  */
268 TRACE_EVENT(kvm_apic,
269 	TP_PROTO(unsigned int rw, unsigned int reg, u64 val),
270 	TP_ARGS(rw, reg, val),
271 
272 	TP_STRUCT__entry(
273 		__field(	unsigned int,	rw		)
274 		__field(	unsigned int,	reg		)
275 		__field(	u64,		val		)
276 	),
277 
278 	TP_fast_assign(
279 		__entry->rw		= rw;
280 		__entry->reg		= reg;
281 		__entry->val		= val;
282 	),
283 
284 	TP_printk("apic_%s %s = 0x%llx",
285 		  __entry->rw ? "write" : "read",
286 		  __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
287 		  __entry->val)
288 );
289 
290 #define trace_kvm_apic_read(reg, val)		trace_kvm_apic(0, reg, val)
291 #define trace_kvm_apic_write(reg, val)		trace_kvm_apic(1, reg, val)
292 
293 #define KVM_ISA_VMX   1
294 #define KVM_ISA_SVM   2
295 
296 #define kvm_print_exit_reason(exit_reason, isa)				\
297 	(isa == KVM_ISA_VMX) ?						\
298 	__print_symbolic(exit_reason & 0xffff, VMX_EXIT_REASONS) :	\
299 	__print_symbolic(exit_reason, SVM_EXIT_REASONS),		\
300 	(isa == KVM_ISA_VMX && exit_reason & ~0xffff) ? " " : "",	\
301 	(isa == KVM_ISA_VMX) ?						\
302 	__print_flags(exit_reason & ~0xffff, " ", VMX_EXIT_REASON_FLAGS) : ""
303 
304 #define TRACE_EVENT_KVM_EXIT(name)					     \
305 TRACE_EVENT(name,							     \
306 	TP_PROTO(struct kvm_vcpu *vcpu, u32 isa),			     \
307 	TP_ARGS(vcpu, isa),						     \
308 									     \
309 	TP_STRUCT__entry(						     \
310 		__field(	unsigned int,	exit_reason	)	     \
311 		__field(	unsigned long,	guest_rip	)	     \
312 		__field(	u32,	        isa             )	     \
313 		__field(	u64,	        info1           )	     \
314 		__field(	u64,	        info2           )	     \
315 		__field(	u32,	        intr_info	)	     \
316 		__field(	u32,	        error_code	)	     \
317 		__field(	unsigned int,	vcpu_id         )	     \
318 		__field(	u64,		requests        )	     \
319 	),								     \
320 									     \
321 	TP_fast_assign(							     \
322 		__entry->guest_rip	= kvm_rip_read(vcpu);		     \
323 		__entry->isa            = isa;				     \
324 		__entry->vcpu_id        = vcpu->vcpu_id;		     \
325 		__entry->requests       = READ_ONCE(vcpu->requests);	     \
326 		kvm_x86_call(get_exit_info)(vcpu,			     \
327 					    &__entry->exit_reason,	     \
328 					    &__entry->info1,		     \
329 					    &__entry->info2,		     \
330 					    &__entry->intr_info,	     \
331 					    &__entry->error_code);	     \
332 	),								     \
333 									     \
334 	TP_printk("vcpu %u reason %s%s%s rip 0x%lx info1 0x%016llx "	     \
335 		  "info2 0x%016llx intr_info 0x%08x error_code 0x%08x "      \
336 		  "requests 0x%016llx",					     \
337 		  __entry->vcpu_id,					     \
338 		  kvm_print_exit_reason(__entry->exit_reason, __entry->isa), \
339 		  __entry->guest_rip, __entry->info1, __entry->info2,	     \
340 		  __entry->intr_info, __entry->error_code, 		     \
341 		  __entry->requests)					     \
342 )
343 
344 /*
345  * Tracepoint for kvm guest exit:
346  */
347 TRACE_EVENT_KVM_EXIT(kvm_exit);
348 
349 /*
350  * Tracepoint for kvm interrupt injection:
351  */
352 TRACE_EVENT(kvm_inj_virq,
353 	TP_PROTO(unsigned int vector, bool soft, bool reinjected),
354 	TP_ARGS(vector, soft, reinjected),
355 
356 	TP_STRUCT__entry(
357 		__field(	unsigned int,	vector		)
358 		__field(	bool,		soft		)
359 		__field(	bool,		reinjected	)
360 	),
361 
362 	TP_fast_assign(
363 		__entry->vector		= vector;
364 		__entry->soft		= soft;
365 		__entry->reinjected	= reinjected;
366 	),
367 
368 	TP_printk("%s 0x%x%s",
369 		  __entry->soft ? "Soft/INTn" : "IRQ", __entry->vector,
370 		  __entry->reinjected ? " [reinjected]" : "")
371 );
372 
373 #define EXS(x) { x##_VECTOR, "#" #x }
374 
375 #define kvm_trace_sym_exc						\
376 	EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM),	\
377 	EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF),		\
378 	EXS(MF), EXS(AC), EXS(MC)
379 
380 /*
381  * Tracepoint for kvm interrupt injection:
382  */
383 TRACE_EVENT(kvm_inj_exception,
384 	TP_PROTO(unsigned exception, bool has_error, unsigned error_code,
385 		 bool reinjected),
386 	TP_ARGS(exception, has_error, error_code, reinjected),
387 
388 	TP_STRUCT__entry(
389 		__field(	u8,	exception	)
390 		__field(	u8,	has_error	)
391 		__field(	u32,	error_code	)
392 		__field(	bool,	reinjected	)
393 	),
394 
395 	TP_fast_assign(
396 		__entry->exception	= exception;
397 		__entry->has_error	= has_error;
398 		__entry->error_code	= error_code;
399 		__entry->reinjected	= reinjected;
400 	),
401 
402 	TP_printk("%s%s%s%s%s",
403 		  __print_symbolic(__entry->exception, kvm_trace_sym_exc),
404 		  !__entry->has_error ? "" : " (",
405 		  !__entry->has_error ? "" : __print_symbolic(__entry->error_code, { }),
406 		  !__entry->has_error ? "" : ")",
407 		  __entry->reinjected ? " [reinjected]" : "")
408 );
409 
410 /*
411  * Tracepoint for page fault.
412  */
413 TRACE_EVENT(kvm_page_fault,
414 	TP_PROTO(struct kvm_vcpu *vcpu, u64 fault_address, u64 error_code),
415 	TP_ARGS(vcpu, fault_address, error_code),
416 
417 	TP_STRUCT__entry(
418 		__field(	unsigned int,	vcpu_id		)
419 		__field(	unsigned long,	guest_rip	)
420 		__field(	u64,		fault_address	)
421 		__field(	u64,		error_code	)
422 	),
423 
424 	TP_fast_assign(
425 		__entry->vcpu_id	= vcpu->vcpu_id;
426 		__entry->guest_rip	= kvm_rip_read(vcpu);
427 		__entry->fault_address	= fault_address;
428 		__entry->error_code	= error_code;
429 	),
430 
431 	TP_printk("vcpu %u rip 0x%lx address 0x%016llx error_code 0x%llx",
432 		  __entry->vcpu_id, __entry->guest_rip,
433 		  __entry->fault_address, __entry->error_code)
434 );
435 
436 /*
437  * Tracepoint for guest MSR access.
438  */
439 TRACE_EVENT(kvm_msr,
440 	TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
441 	TP_ARGS(write, ecx, data, exception),
442 
443 	TP_STRUCT__entry(
444 		__field(	unsigned,	write		)
445 		__field(	u32,		ecx		)
446 		__field(	u64,		data		)
447 		__field(	u8,		exception	)
448 	),
449 
450 	TP_fast_assign(
451 		__entry->write		= write;
452 		__entry->ecx		= ecx;
453 		__entry->data		= data;
454 		__entry->exception	= exception;
455 	),
456 
457 	TP_printk("msr_%s %x = 0x%llx%s",
458 		  __entry->write ? "write" : "read",
459 		  __entry->ecx, __entry->data,
460 		  __entry->exception ? " (#GP)" : "")
461 );
462 
463 #define trace_kvm_msr_read(ecx, data)      trace_kvm_msr(0, ecx, data, false)
464 #define trace_kvm_msr_write(ecx, data)     trace_kvm_msr(1, ecx, data, false)
465 #define trace_kvm_msr_read_ex(ecx)         trace_kvm_msr(0, ecx, 0, true)
466 #define trace_kvm_msr_write_ex(ecx, data)  trace_kvm_msr(1, ecx, data, true)
467 
468 /*
469  * Tracepoint for guest CR access.
470  */
471 TRACE_EVENT(kvm_cr,
472 	TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
473 	TP_ARGS(rw, cr, val),
474 
475 	TP_STRUCT__entry(
476 		__field(	unsigned int,	rw		)
477 		__field(	unsigned int,	cr		)
478 		__field(	unsigned long,	val		)
479 	),
480 
481 	TP_fast_assign(
482 		__entry->rw		= rw;
483 		__entry->cr		= cr;
484 		__entry->val		= val;
485 	),
486 
487 	TP_printk("cr_%s %x = 0x%lx",
488 		  __entry->rw ? "write" : "read",
489 		  __entry->cr, __entry->val)
490 );
491 
492 #define trace_kvm_cr_read(cr, val)		trace_kvm_cr(0, cr, val)
493 #define trace_kvm_cr_write(cr, val)		trace_kvm_cr(1, cr, val)
494 
495 TRACE_EVENT(kvm_pic_set_irq,
496 	    TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
497 	    TP_ARGS(chip, pin, elcr, imr, coalesced),
498 
499 	TP_STRUCT__entry(
500 		__field(	__u8,		chip		)
501 		__field(	__u8,		pin		)
502 		__field(	__u8,		elcr		)
503 		__field(	__u8,		imr		)
504 		__field(	bool,		coalesced	)
505 	),
506 
507 	TP_fast_assign(
508 		__entry->chip		= chip;
509 		__entry->pin		= pin;
510 		__entry->elcr		= elcr;
511 		__entry->imr		= imr;
512 		__entry->coalesced	= coalesced;
513 	),
514 
515 	TP_printk("chip %u pin %u (%s%s)%s",
516 		  __entry->chip, __entry->pin,
517 		  (__entry->elcr & (1 << __entry->pin)) ? "level":"edge",
518 		  (__entry->imr & (1 << __entry->pin)) ? "|masked":"",
519 		  __entry->coalesced ? " (coalesced)" : "")
520 );
521 
522 #define kvm_apic_dst_shorthand		\
523 	{0x0, "dst"},			\
524 	{0x1, "self"},			\
525 	{0x2, "all"},			\
526 	{0x3, "all-but-self"}
527 
528 TRACE_EVENT(kvm_apic_ipi,
529 	    TP_PROTO(__u32 icr_low, __u32 dest_id),
530 	    TP_ARGS(icr_low, dest_id),
531 
532 	TP_STRUCT__entry(
533 		__field(	__u32,		icr_low		)
534 		__field(	__u32,		dest_id		)
535 	),
536 
537 	TP_fast_assign(
538 		__entry->icr_low	= icr_low;
539 		__entry->dest_id	= dest_id;
540 	),
541 
542 	TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)",
543 		  __entry->dest_id, (u8)__entry->icr_low,
544 		  __print_symbolic((__entry->icr_low >> 8 & 0x7),
545 				   kvm_deliver_mode),
546 		  (__entry->icr_low & (1<<11)) ? "logical" : "physical",
547 		  (__entry->icr_low & (1<<14)) ? "assert" : "de-assert",
548 		  (__entry->icr_low & (1<<15)) ? "level" : "edge",
549 		  __print_symbolic((__entry->icr_low >> 18 & 0x3),
550 				   kvm_apic_dst_shorthand))
551 );
552 
553 TRACE_EVENT(kvm_apic_accept_irq,
554 	    TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
555 	    TP_ARGS(apicid, dm, tm, vec),
556 
557 	TP_STRUCT__entry(
558 		__field(	__u32,		apicid		)
559 		__field(	__u16,		dm		)
560 		__field(	__u16,		tm		)
561 		__field(	__u8,		vec		)
562 	),
563 
564 	TP_fast_assign(
565 		__entry->apicid		= apicid;
566 		__entry->dm		= dm;
567 		__entry->tm		= tm;
568 		__entry->vec		= vec;
569 	),
570 
571 	TP_printk("apicid %x vec %u (%s|%s)",
572 		  __entry->apicid, __entry->vec,
573 		  __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
574 		  __entry->tm ? "level" : "edge")
575 );
576 
577 TRACE_EVENT(kvm_eoi,
578 	    TP_PROTO(struct kvm_lapic *apic, int vector),
579 	    TP_ARGS(apic, vector),
580 
581 	TP_STRUCT__entry(
582 		__field(	__u32,		apicid		)
583 		__field(	int,		vector		)
584 	),
585 
586 	TP_fast_assign(
587 		__entry->apicid		= apic->vcpu->vcpu_id;
588 		__entry->vector		= vector;
589 	),
590 
591 	TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
592 );
593 
594 TRACE_EVENT(kvm_pv_eoi,
595 	    TP_PROTO(struct kvm_lapic *apic, int vector),
596 	    TP_ARGS(apic, vector),
597 
598 	TP_STRUCT__entry(
599 		__field(	__u32,		apicid		)
600 		__field(	int,		vector		)
601 	),
602 
603 	TP_fast_assign(
604 		__entry->apicid		= apic->vcpu->vcpu_id;
605 		__entry->vector		= vector;
606 	),
607 
608 	TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
609 );
610 
611 /*
612  * Tracepoint for nested VMRUN
613  */
614 TRACE_EVENT(kvm_nested_vmenter,
615 	    TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
616 		     __u32 event_inj, bool tdp_enabled, __u64 guest_tdp_pgd,
617 		     __u64 guest_cr3, __u32 isa),
618 	    TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, tdp_enabled,
619 		    guest_tdp_pgd, guest_cr3, isa),
620 
621 	TP_STRUCT__entry(
622 		__field(	__u64,		rip		)
623 		__field(	__u64,		vmcb		)
624 		__field(	__u64,		nested_rip	)
625 		__field(	__u32,		int_ctl		)
626 		__field(	__u32,		event_inj	)
627 		__field(	bool,		tdp_enabled	)
628 		__field(	__u64,		guest_pgd	)
629 		__field(	__u32,		isa		)
630 	),
631 
632 	TP_fast_assign(
633 		__entry->rip		= rip;
634 		__entry->vmcb		= vmcb;
635 		__entry->nested_rip	= nested_rip;
636 		__entry->int_ctl	= int_ctl;
637 		__entry->event_inj	= event_inj;
638 		__entry->tdp_enabled	= tdp_enabled;
639 		__entry->guest_pgd	= tdp_enabled ? guest_tdp_pgd : guest_cr3;
640 		__entry->isa		= isa;
641 	),
642 
643 	TP_printk("rip: 0x%016llx %s: 0x%016llx nested_rip: 0x%016llx "
644 		  "int_ctl: 0x%08x event_inj: 0x%08x nested_%s=%s %s: 0x%016llx",
645 		  __entry->rip,
646 		  __entry->isa == KVM_ISA_VMX ? "vmcs" : "vmcb",
647 		  __entry->vmcb,
648 		  __entry->nested_rip,
649 		  __entry->int_ctl,
650 		  __entry->event_inj,
651 		  __entry->isa == KVM_ISA_VMX ? "ept" : "npt",
652 		  __entry->tdp_enabled ? "y" : "n",
653 		  !__entry->tdp_enabled ? "guest_cr3" :
654 		  __entry->isa == KVM_ISA_VMX ? "nested_eptp" : "nested_cr3",
655 		  __entry->guest_pgd)
656 );
657 
658 TRACE_EVENT(kvm_nested_intercepts,
659 	    TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions,
660 		     __u32 intercept1, __u32 intercept2, __u32 intercept3),
661 	    TP_ARGS(cr_read, cr_write, exceptions, intercept1,
662 		    intercept2, intercept3),
663 
664 	TP_STRUCT__entry(
665 		__field(	__u16,		cr_read		)
666 		__field(	__u16,		cr_write	)
667 		__field(	__u32,		exceptions	)
668 		__field(	__u32,		intercept1	)
669 		__field(	__u32,		intercept2	)
670 		__field(	__u32,		intercept3	)
671 	),
672 
673 	TP_fast_assign(
674 		__entry->cr_read	= cr_read;
675 		__entry->cr_write	= cr_write;
676 		__entry->exceptions	= exceptions;
677 		__entry->intercept1	= intercept1;
678 		__entry->intercept2	= intercept2;
679 		__entry->intercept3	= intercept3;
680 	),
681 
682 	TP_printk("cr_read: %04x cr_write: %04x excp: %08x "
683 		  "intercepts: %08x %08x %08x",
684 		  __entry->cr_read, __entry->cr_write, __entry->exceptions,
685 		  __entry->intercept1, __entry->intercept2, __entry->intercept3)
686 );
687 /*
688  * Tracepoint for #VMEXIT while nested
689  */
690 TRACE_EVENT_KVM_EXIT(kvm_nested_vmexit);
691 
692 /*
693  * Tracepoint for #VMEXIT reinjected to the guest
694  */
695 TRACE_EVENT(kvm_nested_vmexit_inject,
696 	    TP_PROTO(__u32 exit_code,
697 		     __u64 exit_info1, __u64 exit_info2,
698 		     __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
699 	    TP_ARGS(exit_code, exit_info1, exit_info2,
700 		    exit_int_info, exit_int_info_err, isa),
701 
702 	TP_STRUCT__entry(
703 		__field(	__u32,		exit_code		)
704 		__field(	__u64,		exit_info1		)
705 		__field(	__u64,		exit_info2		)
706 		__field(	__u32,		exit_int_info		)
707 		__field(	__u32,		exit_int_info_err	)
708 		__field(	__u32,		isa			)
709 	),
710 
711 	TP_fast_assign(
712 		__entry->exit_code		= exit_code;
713 		__entry->exit_info1		= exit_info1;
714 		__entry->exit_info2		= exit_info2;
715 		__entry->exit_int_info		= exit_int_info;
716 		__entry->exit_int_info_err	= exit_int_info_err;
717 		__entry->isa			= isa;
718 	),
719 
720 	TP_printk("reason: %s%s%s ext_inf1: 0x%016llx "
721 		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
722 		  kvm_print_exit_reason(__entry->exit_code, __entry->isa),
723 		  __entry->exit_info1, __entry->exit_info2,
724 		  __entry->exit_int_info, __entry->exit_int_info_err)
725 );
726 
727 /*
728  * Tracepoint for nested #vmexit because of interrupt pending
729  */
730 TRACE_EVENT(kvm_nested_intr_vmexit,
731 	    TP_PROTO(__u64 rip),
732 	    TP_ARGS(rip),
733 
734 	TP_STRUCT__entry(
735 		__field(	__u64,	rip	)
736 	),
737 
738 	TP_fast_assign(
739 		__entry->rip	=	rip
740 	),
741 
742 	TP_printk("rip: 0x%016llx", __entry->rip)
743 );
744 
745 /*
746  * Tracepoint for nested #vmexit because of interrupt pending
747  */
748 TRACE_EVENT(kvm_invlpga,
749 	    TP_PROTO(__u64 rip, unsigned int asid, u64 address),
750 	    TP_ARGS(rip, asid, address),
751 
752 	TP_STRUCT__entry(
753 		__field(	__u64,		rip	)
754 		__field(	unsigned int,	asid	)
755 		__field(	__u64,		address	)
756 	),
757 
758 	TP_fast_assign(
759 		__entry->rip		=	rip;
760 		__entry->asid		=	asid;
761 		__entry->address	=	address;
762 	),
763 
764 	TP_printk("rip: 0x%016llx asid: %u address: 0x%016llx",
765 		  __entry->rip, __entry->asid, __entry->address)
766 );
767 
768 /*
769  * Tracepoint for nested #vmexit because of interrupt pending
770  */
771 TRACE_EVENT(kvm_skinit,
772 	    TP_PROTO(__u64 rip, __u32 slb),
773 	    TP_ARGS(rip, slb),
774 
775 	TP_STRUCT__entry(
776 		__field(	__u64,	rip	)
777 		__field(	__u32,	slb	)
778 	),
779 
780 	TP_fast_assign(
781 		__entry->rip		=	rip;
782 		__entry->slb		=	slb;
783 	),
784 
785 	TP_printk("rip: 0x%016llx slb: 0x%08x",
786 		  __entry->rip, __entry->slb)
787 );
788 
789 #define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
790 #define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
791 #define KVM_EMUL_INSN_F_CS_D   (1 << 2)
792 #define KVM_EMUL_INSN_F_CS_L   (1 << 3)
793 
794 #define kvm_trace_symbol_emul_flags	                  \
795 	{ 0,   			    "real" },		  \
796 	{ KVM_EMUL_INSN_F_CR0_PE			  \
797 	  | KVM_EMUL_INSN_F_EFL_VM, "vm16" },		  \
798 	{ KVM_EMUL_INSN_F_CR0_PE,   "prot16" },		  \
799 	{ KVM_EMUL_INSN_F_CR0_PE			  \
800 	  | KVM_EMUL_INSN_F_CS_D,   "prot32" },		  \
801 	{ KVM_EMUL_INSN_F_CR0_PE			  \
802 	  | KVM_EMUL_INSN_F_CS_L,   "prot64" }
803 
804 #define kei_decode_mode(mode) ({			\
805 	u8 flags = 0xff;				\
806 	switch (mode) {					\
807 	case X86EMUL_MODE_REAL:				\
808 		flags = 0;				\
809 		break;					\
810 	case X86EMUL_MODE_VM86:				\
811 		flags = KVM_EMUL_INSN_F_EFL_VM;		\
812 		break;					\
813 	case X86EMUL_MODE_PROT16:			\
814 		flags = KVM_EMUL_INSN_F_CR0_PE;		\
815 		break;					\
816 	case X86EMUL_MODE_PROT32:			\
817 		flags = KVM_EMUL_INSN_F_CR0_PE		\
818 			| KVM_EMUL_INSN_F_CS_D;		\
819 		break;					\
820 	case X86EMUL_MODE_PROT64:			\
821 		flags = KVM_EMUL_INSN_F_CR0_PE		\
822 			| KVM_EMUL_INSN_F_CS_L;		\
823 		break;					\
824 	}						\
825 	flags;						\
826 	})
827 
828 TRACE_EVENT(kvm_emulate_insn,
829 	TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
830 	TP_ARGS(vcpu, failed),
831 
832 	TP_STRUCT__entry(
833 		__field(    __u64, rip                       )
834 		__field(    __u32, csbase                    )
835 		__field(    __u8,  len                       )
836 		__array(    __u8,  insn,    15	             )
837 		__field(    __u8,  flags       	   	     )
838 		__field(    __u8,  failed                    )
839 		),
840 
841 	TP_fast_assign(
842 		__entry->csbase = kvm_x86_call(get_segment_base)(vcpu,
843 								 VCPU_SREG_CS);
844 		__entry->len = vcpu->arch.emulate_ctxt->fetch.ptr
845 			       - vcpu->arch.emulate_ctxt->fetch.data;
846 		__entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len;
847 		memcpy(__entry->insn,
848 		       vcpu->arch.emulate_ctxt->fetch.data,
849 		       15);
850 		__entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt->mode);
851 		__entry->failed = failed;
852 		),
853 
854 	TP_printk("%x:%llx:%s (%s)%s",
855 		  __entry->csbase, __entry->rip,
856 		  __print_hex(__entry->insn, __entry->len),
857 		  __print_symbolic(__entry->flags,
858 				   kvm_trace_symbol_emul_flags),
859 		  __entry->failed ? " failed" : ""
860 		)
861 	);
862 
863 #define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
864 #define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
865 
866 TRACE_EVENT(
867 	vcpu_match_mmio,
868 	TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
869 	TP_ARGS(gva, gpa, write, gpa_match),
870 
871 	TP_STRUCT__entry(
872 		__field(gva_t, gva)
873 		__field(gpa_t, gpa)
874 		__field(bool, write)
875 		__field(bool, gpa_match)
876 		),
877 
878 	TP_fast_assign(
879 		__entry->gva = gva;
880 		__entry->gpa = gpa;
881 		__entry->write = write;
882 		__entry->gpa_match = gpa_match
883 		),
884 
885 	TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
886 		  __entry->write ? "Write" : "Read",
887 		  __entry->gpa_match ? "GPA" : "GVA")
888 );
889 
890 TRACE_EVENT(kvm_write_tsc_offset,
891 	TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset,
892 		 __u64 next_tsc_offset),
893 	TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset),
894 
895 	TP_STRUCT__entry(
896 		__field( unsigned int,	vcpu_id				)
897 		__field(	__u64,	previous_tsc_offset		)
898 		__field(	__u64,	next_tsc_offset			)
899 	),
900 
901 	TP_fast_assign(
902 		__entry->vcpu_id		= vcpu_id;
903 		__entry->previous_tsc_offset	= previous_tsc_offset;
904 		__entry->next_tsc_offset	= next_tsc_offset;
905 	),
906 
907 	TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id,
908 		  __entry->previous_tsc_offset, __entry->next_tsc_offset)
909 );
910 
911 #ifdef CONFIG_X86_64
912 
913 #define host_clocks					\
914 	{VDSO_CLOCKMODE_NONE, "none"},			\
915 	{VDSO_CLOCKMODE_TSC,  "tsc"}			\
916 
917 TRACE_EVENT(kvm_update_master_clock,
918 	TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
919 	TP_ARGS(use_master_clock, host_clock, offset_matched),
920 
921 	TP_STRUCT__entry(
922 		__field(		bool,	use_master_clock	)
923 		__field(	unsigned int,	host_clock		)
924 		__field(		bool,	offset_matched		)
925 	),
926 
927 	TP_fast_assign(
928 		__entry->use_master_clock	= use_master_clock;
929 		__entry->host_clock		= host_clock;
930 		__entry->offset_matched		= offset_matched;
931 	),
932 
933 	TP_printk("masterclock %d hostclock %s offsetmatched %u",
934 		  __entry->use_master_clock,
935 		  __print_symbolic(__entry->host_clock, host_clocks),
936 		  __entry->offset_matched)
937 );
938 
939 TRACE_EVENT(kvm_track_tsc,
940 	TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched,
941 		 unsigned int online_vcpus, bool use_master_clock,
942 		 unsigned int host_clock),
943 	TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock,
944 		host_clock),
945 
946 	TP_STRUCT__entry(
947 		__field(	unsigned int,	vcpu_id			)
948 		__field(	unsigned int,	nr_vcpus_matched_tsc	)
949 		__field(	unsigned int,	online_vcpus		)
950 		__field(	bool,		use_master_clock	)
951 		__field(	unsigned int,	host_clock		)
952 	),
953 
954 	TP_fast_assign(
955 		__entry->vcpu_id		= vcpu_id;
956 		__entry->nr_vcpus_matched_tsc	= nr_matched;
957 		__entry->online_vcpus		= online_vcpus;
958 		__entry->use_master_clock	= use_master_clock;
959 		__entry->host_clock		= host_clock;
960 	),
961 
962 	TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u"
963 		  " hostclock %s",
964 		  __entry->vcpu_id, __entry->use_master_clock,
965 		  __entry->nr_vcpus_matched_tsc, __entry->online_vcpus,
966 		  __print_symbolic(__entry->host_clock, host_clocks))
967 );
968 
969 #endif /* CONFIG_X86_64 */
970 
971 /*
972  * Tracepoint for PML full VMEXIT.
973  */
974 TRACE_EVENT(kvm_pml_full,
975 	TP_PROTO(unsigned int vcpu_id),
976 	TP_ARGS(vcpu_id),
977 
978 	TP_STRUCT__entry(
979 		__field(	unsigned int,	vcpu_id			)
980 	),
981 
982 	TP_fast_assign(
983 		__entry->vcpu_id		= vcpu_id;
984 	),
985 
986 	TP_printk("vcpu %d: PML full", __entry->vcpu_id)
987 );
988 
989 TRACE_EVENT(kvm_ple_window_update,
990 	TP_PROTO(unsigned int vcpu_id, unsigned int new, unsigned int old),
991 	TP_ARGS(vcpu_id, new, old),
992 
993 	TP_STRUCT__entry(
994 		__field(        unsigned int,   vcpu_id         )
995 		__field(        unsigned int,       new         )
996 		__field(        unsigned int,       old         )
997 	),
998 
999 	TP_fast_assign(
1000 		__entry->vcpu_id        = vcpu_id;
1001 		__entry->new            = new;
1002 		__entry->old            = old;
1003 	),
1004 
1005 	TP_printk("vcpu %u old %u new %u (%s)",
1006 	          __entry->vcpu_id, __entry->old, __entry->new,
1007 		  __entry->old < __entry->new ? "growed" : "shrinked")
1008 );
1009 
1010 TRACE_EVENT(kvm_pvclock_update,
1011 	TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock),
1012 	TP_ARGS(vcpu_id, pvclock),
1013 
1014 	TP_STRUCT__entry(
1015 		__field(	unsigned int,	vcpu_id			)
1016 		__field(	__u32,		version			)
1017 		__field(	__u64,		tsc_timestamp		)
1018 		__field(	__u64,		system_time		)
1019 		__field(	__u32,		tsc_to_system_mul	)
1020 		__field(	__s8,		tsc_shift		)
1021 		__field(	__u8,		flags			)
1022 	),
1023 
1024 	TP_fast_assign(
1025 		__entry->vcpu_id	   = vcpu_id;
1026 		__entry->version	   = pvclock->version;
1027 		__entry->tsc_timestamp	   = pvclock->tsc_timestamp;
1028 		__entry->system_time	   = pvclock->system_time;
1029 		__entry->tsc_to_system_mul = pvclock->tsc_to_system_mul;
1030 		__entry->tsc_shift	   = pvclock->tsc_shift;
1031 		__entry->flags		   = pvclock->flags;
1032 	),
1033 
1034 	TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, "
1035 		  "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, "
1036 		  "flags 0x%x }",
1037 		  __entry->vcpu_id,
1038 		  __entry->version,
1039 		  __entry->tsc_timestamp,
1040 		  __entry->system_time,
1041 		  __entry->tsc_to_system_mul,
1042 		  __entry->tsc_shift,
1043 		  __entry->flags)
1044 );
1045 
1046 TRACE_EVENT(kvm_wait_lapic_expire,
1047 	TP_PROTO(unsigned int vcpu_id, s64 delta),
1048 	TP_ARGS(vcpu_id, delta),
1049 
1050 	TP_STRUCT__entry(
1051 		__field(	unsigned int,	vcpu_id		)
1052 		__field(	s64,		delta		)
1053 	),
1054 
1055 	TP_fast_assign(
1056 		__entry->vcpu_id	   = vcpu_id;
1057 		__entry->delta             = delta;
1058 	),
1059 
1060 	TP_printk("vcpu %u: delta %lld (%s)",
1061 		  __entry->vcpu_id,
1062 		  __entry->delta,
1063 		  __entry->delta < 0 ? "early" : "late")
1064 );
1065 
1066 TRACE_EVENT(kvm_smm_transition,
1067 	TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering),
1068 	TP_ARGS(vcpu_id, smbase, entering),
1069 
1070 	TP_STRUCT__entry(
1071 		__field(	unsigned int,	vcpu_id		)
1072 		__field(	u64,		smbase		)
1073 		__field(	bool,		entering	)
1074 	),
1075 
1076 	TP_fast_assign(
1077 		__entry->vcpu_id	= vcpu_id;
1078 		__entry->smbase		= smbase;
1079 		__entry->entering	= entering;
1080 	),
1081 
1082 	TP_printk("vcpu %u: %s SMM, smbase 0x%llx",
1083 		  __entry->vcpu_id,
1084 		  __entry->entering ? "entering" : "leaving",
1085 		  __entry->smbase)
1086 );
1087 
1088 /*
1089  * Tracepoint for VT-d posted-interrupts and AMD-Vi Guest Virtual APIC.
1090  */
1091 TRACE_EVENT(kvm_pi_irte_update,
1092 	TP_PROTO(unsigned int host_irq, unsigned int vcpu_id,
1093 		 unsigned int gsi, unsigned int gvec,
1094 		 u64 pi_desc_addr, bool set),
1095 	TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set),
1096 
1097 	TP_STRUCT__entry(
1098 		__field(	unsigned int,	host_irq	)
1099 		__field(	unsigned int,	vcpu_id		)
1100 		__field(	unsigned int,	gsi		)
1101 		__field(	unsigned int,	gvec		)
1102 		__field(	u64,		pi_desc_addr	)
1103 		__field(	bool,		set		)
1104 	),
1105 
1106 	TP_fast_assign(
1107 		__entry->host_irq	= host_irq;
1108 		__entry->vcpu_id	= vcpu_id;
1109 		__entry->gsi		= gsi;
1110 		__entry->gvec		= gvec;
1111 		__entry->pi_desc_addr	= pi_desc_addr;
1112 		__entry->set		= set;
1113 	),
1114 
1115 	TP_printk("PI is %s for irq %u, vcpu %u, gsi: 0x%x, "
1116 		  "gvec: 0x%x, pi_desc_addr: 0x%llx",
1117 		  __entry->set ? "enabled and being updated" : "disabled",
1118 		  __entry->host_irq,
1119 		  __entry->vcpu_id,
1120 		  __entry->gsi,
1121 		  __entry->gvec,
1122 		  __entry->pi_desc_addr)
1123 );
1124 
1125 /*
1126  * Tracepoint for kvm_hv_notify_acked_sint.
1127  */
1128 TRACE_EVENT(kvm_hv_notify_acked_sint,
1129 	TP_PROTO(int vcpu_id, u32 sint),
1130 	TP_ARGS(vcpu_id, sint),
1131 
1132 	TP_STRUCT__entry(
1133 		__field(int, vcpu_id)
1134 		__field(u32, sint)
1135 	),
1136 
1137 	TP_fast_assign(
1138 		__entry->vcpu_id = vcpu_id;
1139 		__entry->sint = sint;
1140 	),
1141 
1142 	TP_printk("vcpu_id %d sint %u", __entry->vcpu_id, __entry->sint)
1143 );
1144 
1145 /*
1146  * Tracepoint for synic_set_irq.
1147  */
1148 TRACE_EVENT(kvm_hv_synic_set_irq,
1149 	TP_PROTO(int vcpu_id, u32 sint, int vector, int ret),
1150 	TP_ARGS(vcpu_id, sint, vector, ret),
1151 
1152 	TP_STRUCT__entry(
1153 		__field(int, vcpu_id)
1154 		__field(u32, sint)
1155 		__field(int, vector)
1156 		__field(int, ret)
1157 	),
1158 
1159 	TP_fast_assign(
1160 		__entry->vcpu_id = vcpu_id;
1161 		__entry->sint = sint;
1162 		__entry->vector = vector;
1163 		__entry->ret = ret;
1164 	),
1165 
1166 	TP_printk("vcpu_id %d sint %u vector %d ret %d",
1167 		  __entry->vcpu_id, __entry->sint, __entry->vector,
1168 		  __entry->ret)
1169 );
1170 
1171 /*
1172  * Tracepoint for kvm_hv_synic_send_eoi.
1173  */
1174 TRACE_EVENT(kvm_hv_synic_send_eoi,
1175 	TP_PROTO(int vcpu_id, int vector),
1176 	TP_ARGS(vcpu_id, vector),
1177 
1178 	TP_STRUCT__entry(
1179 		__field(int, vcpu_id)
1180 		__field(u32, sint)
1181 		__field(int, vector)
1182 		__field(int, ret)
1183 	),
1184 
1185 	TP_fast_assign(
1186 		__entry->vcpu_id = vcpu_id;
1187 		__entry->vector	= vector;
1188 	),
1189 
1190 	TP_printk("vcpu_id %d vector %d", __entry->vcpu_id, __entry->vector)
1191 );
1192 
1193 /*
1194  * Tracepoint for synic_set_msr.
1195  */
1196 TRACE_EVENT(kvm_hv_synic_set_msr,
1197 	TP_PROTO(int vcpu_id, u32 msr, u64 data, bool host),
1198 	TP_ARGS(vcpu_id, msr, data, host),
1199 
1200 	TP_STRUCT__entry(
1201 		__field(int, vcpu_id)
1202 		__field(u32, msr)
1203 		__field(u64, data)
1204 		__field(bool, host)
1205 	),
1206 
1207 	TP_fast_assign(
1208 		__entry->vcpu_id = vcpu_id;
1209 		__entry->msr = msr;
1210 		__entry->data = data;
1211 		__entry->host = host
1212 	),
1213 
1214 	TP_printk("vcpu_id %d msr 0x%x data 0x%llx host %d",
1215 		  __entry->vcpu_id, __entry->msr, __entry->data, __entry->host)
1216 );
1217 
1218 /*
1219  * Tracepoint for stimer_set_config.
1220  */
1221 TRACE_EVENT(kvm_hv_stimer_set_config,
1222 	TP_PROTO(int vcpu_id, int timer_index, u64 config, bool host),
1223 	TP_ARGS(vcpu_id, timer_index, config, host),
1224 
1225 	TP_STRUCT__entry(
1226 		__field(int, vcpu_id)
1227 		__field(int, timer_index)
1228 		__field(u64, config)
1229 		__field(bool, host)
1230 	),
1231 
1232 	TP_fast_assign(
1233 		__entry->vcpu_id = vcpu_id;
1234 		__entry->timer_index = timer_index;
1235 		__entry->config = config;
1236 		__entry->host = host;
1237 	),
1238 
1239 	TP_printk("vcpu_id %d timer %d config 0x%llx host %d",
1240 		  __entry->vcpu_id, __entry->timer_index, __entry->config,
1241 		  __entry->host)
1242 );
1243 
1244 /*
1245  * Tracepoint for stimer_set_count.
1246  */
1247 TRACE_EVENT(kvm_hv_stimer_set_count,
1248 	TP_PROTO(int vcpu_id, int timer_index, u64 count, bool host),
1249 	TP_ARGS(vcpu_id, timer_index, count, host),
1250 
1251 	TP_STRUCT__entry(
1252 		__field(int, vcpu_id)
1253 		__field(int, timer_index)
1254 		__field(u64, count)
1255 		__field(bool, host)
1256 	),
1257 
1258 	TP_fast_assign(
1259 		__entry->vcpu_id = vcpu_id;
1260 		__entry->timer_index = timer_index;
1261 		__entry->count = count;
1262 		__entry->host = host;
1263 	),
1264 
1265 	TP_printk("vcpu_id %d timer %d count %llu host %d",
1266 		  __entry->vcpu_id, __entry->timer_index, __entry->count,
1267 		  __entry->host)
1268 );
1269 
1270 /*
1271  * Tracepoint for stimer_start(periodic timer case).
1272  */
1273 TRACE_EVENT(kvm_hv_stimer_start_periodic,
1274 	TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 exp_time),
1275 	TP_ARGS(vcpu_id, timer_index, time_now, exp_time),
1276 
1277 	TP_STRUCT__entry(
1278 		__field(int, vcpu_id)
1279 		__field(int, timer_index)
1280 		__field(u64, time_now)
1281 		__field(u64, exp_time)
1282 	),
1283 
1284 	TP_fast_assign(
1285 		__entry->vcpu_id = vcpu_id;
1286 		__entry->timer_index = timer_index;
1287 		__entry->time_now = time_now;
1288 		__entry->exp_time = exp_time;
1289 	),
1290 
1291 	TP_printk("vcpu_id %d timer %d time_now %llu exp_time %llu",
1292 		  __entry->vcpu_id, __entry->timer_index, __entry->time_now,
1293 		  __entry->exp_time)
1294 );
1295 
1296 /*
1297  * Tracepoint for stimer_start(one-shot timer case).
1298  */
1299 TRACE_EVENT(kvm_hv_stimer_start_one_shot,
1300 	TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 count),
1301 	TP_ARGS(vcpu_id, timer_index, time_now, count),
1302 
1303 	TP_STRUCT__entry(
1304 		__field(int, vcpu_id)
1305 		__field(int, timer_index)
1306 		__field(u64, time_now)
1307 		__field(u64, count)
1308 	),
1309 
1310 	TP_fast_assign(
1311 		__entry->vcpu_id = vcpu_id;
1312 		__entry->timer_index = timer_index;
1313 		__entry->time_now = time_now;
1314 		__entry->count = count;
1315 	),
1316 
1317 	TP_printk("vcpu_id %d timer %d time_now %llu count %llu",
1318 		  __entry->vcpu_id, __entry->timer_index, __entry->time_now,
1319 		  __entry->count)
1320 );
1321 
1322 /*
1323  * Tracepoint for stimer_timer_callback.
1324  */
1325 TRACE_EVENT(kvm_hv_stimer_callback,
1326 	TP_PROTO(int vcpu_id, int timer_index),
1327 	TP_ARGS(vcpu_id, timer_index),
1328 
1329 	TP_STRUCT__entry(
1330 		__field(int, vcpu_id)
1331 		__field(int, timer_index)
1332 	),
1333 
1334 	TP_fast_assign(
1335 		__entry->vcpu_id = vcpu_id;
1336 		__entry->timer_index = timer_index;
1337 	),
1338 
1339 	TP_printk("vcpu_id %d timer %d",
1340 		  __entry->vcpu_id, __entry->timer_index)
1341 );
1342 
1343 /*
1344  * Tracepoint for stimer_expiration.
1345  */
1346 TRACE_EVENT(kvm_hv_stimer_expiration,
1347 	TP_PROTO(int vcpu_id, int timer_index, int direct, int msg_send_result),
1348 	TP_ARGS(vcpu_id, timer_index, direct, msg_send_result),
1349 
1350 	TP_STRUCT__entry(
1351 		__field(int, vcpu_id)
1352 		__field(int, timer_index)
1353 		__field(int, direct)
1354 		__field(int, msg_send_result)
1355 	),
1356 
1357 	TP_fast_assign(
1358 		__entry->vcpu_id = vcpu_id;
1359 		__entry->timer_index = timer_index;
1360 		__entry->direct = direct;
1361 		__entry->msg_send_result = msg_send_result;
1362 	),
1363 
1364 	TP_printk("vcpu_id %d timer %d direct %d send result %d",
1365 		  __entry->vcpu_id, __entry->timer_index,
1366 		  __entry->direct, __entry->msg_send_result)
1367 );
1368 
1369 /*
1370  * Tracepoint for stimer_cleanup.
1371  */
1372 TRACE_EVENT(kvm_hv_stimer_cleanup,
1373 	TP_PROTO(int vcpu_id, int timer_index),
1374 	TP_ARGS(vcpu_id, timer_index),
1375 
1376 	TP_STRUCT__entry(
1377 		__field(int, vcpu_id)
1378 		__field(int, timer_index)
1379 	),
1380 
1381 	TP_fast_assign(
1382 		__entry->vcpu_id = vcpu_id;
1383 		__entry->timer_index = timer_index;
1384 	),
1385 
1386 	TP_printk("vcpu_id %d timer %d",
1387 		  __entry->vcpu_id, __entry->timer_index)
1388 );
1389 
1390 #define kvm_print_apicv_inhibit_reasons(inhibits)	\
1391 	(inhibits), (inhibits) ? " " : "",		\
1392 	(inhibits) ? __print_flags(inhibits, "|", APICV_INHIBIT_REASONS) : ""
1393 
1394 TRACE_EVENT(kvm_apicv_inhibit_changed,
1395 	    TP_PROTO(int reason, bool set, unsigned long inhibits),
1396 	    TP_ARGS(reason, set, inhibits),
1397 
1398 	TP_STRUCT__entry(
1399 		__field(int, reason)
1400 		__field(bool, set)
1401 		__field(unsigned long, inhibits)
1402 	),
1403 
1404 	TP_fast_assign(
1405 		__entry->reason = reason;
1406 		__entry->set = set;
1407 		__entry->inhibits = inhibits;
1408 	),
1409 
1410 	TP_printk("%s reason=%u, inhibits=0x%lx%s%s",
1411 		  __entry->set ? "set" : "cleared",
1412 		  __entry->reason,
1413 		  kvm_print_apicv_inhibit_reasons(__entry->inhibits))
1414 );
1415 
1416 TRACE_EVENT(kvm_apicv_accept_irq,
1417 	    TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
1418 	    TP_ARGS(apicid, dm, tm, vec),
1419 
1420 	TP_STRUCT__entry(
1421 		__field(	__u32,		apicid		)
1422 		__field(	__u16,		dm		)
1423 		__field(	__u16,		tm		)
1424 		__field(	__u8,		vec		)
1425 	),
1426 
1427 	TP_fast_assign(
1428 		__entry->apicid		= apicid;
1429 		__entry->dm		= dm;
1430 		__entry->tm		= tm;
1431 		__entry->vec		= vec;
1432 	),
1433 
1434 	TP_printk("apicid %x vec %u (%s|%s)",
1435 		  __entry->apicid, __entry->vec,
1436 		  __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
1437 		  __entry->tm ? "level" : "edge")
1438 );
1439 
1440 /*
1441  * Tracepoint for AMD AVIC
1442  */
1443 TRACE_EVENT(kvm_avic_incomplete_ipi,
1444 	    TP_PROTO(u32 vcpu, u32 icrh, u32 icrl, u32 id, u32 index),
1445 	    TP_ARGS(vcpu, icrh, icrl, id, index),
1446 
1447 	TP_STRUCT__entry(
1448 		__field(u32, vcpu)
1449 		__field(u32, icrh)
1450 		__field(u32, icrl)
1451 		__field(u32, id)
1452 		__field(u32, index)
1453 	),
1454 
1455 	TP_fast_assign(
1456 		__entry->vcpu = vcpu;
1457 		__entry->icrh = icrh;
1458 		__entry->icrl = icrl;
1459 		__entry->id = id;
1460 		__entry->index = index;
1461 	),
1462 
1463 	TP_printk("vcpu=%u, icrh:icrl=%#010x:%08x, id=%u, index=%u",
1464 		  __entry->vcpu, __entry->icrh, __entry->icrl,
1465 		  __entry->id, __entry->index)
1466 );
1467 
1468 TRACE_EVENT(kvm_avic_unaccelerated_access,
1469 	    TP_PROTO(u32 vcpu, u32 offset, bool ft, bool rw, u32 vec),
1470 	    TP_ARGS(vcpu, offset, ft, rw, vec),
1471 
1472 	TP_STRUCT__entry(
1473 		__field(u32, vcpu)
1474 		__field(u32, offset)
1475 		__field(bool, ft)
1476 		__field(bool, rw)
1477 		__field(u32, vec)
1478 	),
1479 
1480 	TP_fast_assign(
1481 		__entry->vcpu = vcpu;
1482 		__entry->offset = offset;
1483 		__entry->ft = ft;
1484 		__entry->rw = rw;
1485 		__entry->vec = vec;
1486 	),
1487 
1488 	TP_printk("vcpu=%u, offset=%#x(%s), %s, %s, vec=%#x",
1489 		  __entry->vcpu,
1490 		  __entry->offset,
1491 		  __print_symbolic(__entry->offset, kvm_trace_symbol_apic),
1492 		  __entry->ft ? "trap" : "fault",
1493 		  __entry->rw ? "write" : "read",
1494 		  __entry->vec)
1495 );
1496 
1497 TRACE_EVENT(kvm_avic_ga_log,
1498 	    TP_PROTO(u32 vmid, u32 vcpuid),
1499 	    TP_ARGS(vmid, vcpuid),
1500 
1501 	TP_STRUCT__entry(
1502 		__field(u32, vmid)
1503 		__field(u32, vcpuid)
1504 	),
1505 
1506 	TP_fast_assign(
1507 		__entry->vmid = vmid;
1508 		__entry->vcpuid = vcpuid;
1509 	),
1510 
1511 	TP_printk("vmid=%u, vcpuid=%u",
1512 		  __entry->vmid, __entry->vcpuid)
1513 );
1514 
1515 TRACE_EVENT(kvm_avic_kick_vcpu_slowpath,
1516 	    TP_PROTO(u32 icrh, u32 icrl, u32 index),
1517 	    TP_ARGS(icrh, icrl, index),
1518 
1519 	TP_STRUCT__entry(
1520 		__field(u32, icrh)
1521 		__field(u32, icrl)
1522 		__field(u32, index)
1523 	),
1524 
1525 	TP_fast_assign(
1526 		__entry->icrh = icrh;
1527 		__entry->icrl = icrl;
1528 		__entry->index = index;
1529 	),
1530 
1531 	TP_printk("icrh:icrl=%#08x:%08x, index=%u",
1532 		  __entry->icrh, __entry->icrl, __entry->index)
1533 );
1534 
1535 TRACE_EVENT(kvm_avic_doorbell,
1536 	    TP_PROTO(u32 vcpuid, u32 apicid),
1537 	    TP_ARGS(vcpuid, apicid),
1538 
1539 	TP_STRUCT__entry(
1540 		__field(u32, vcpuid)
1541 		__field(u32, apicid)
1542 	),
1543 
1544 	TP_fast_assign(
1545 		__entry->vcpuid = vcpuid;
1546 		__entry->apicid = apicid;
1547 	),
1548 
1549 	TP_printk("vcpuid=%u, apicid=%u",
1550 		  __entry->vcpuid, __entry->apicid)
1551 );
1552 
1553 TRACE_EVENT(kvm_hv_timer_state,
1554 		TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use),
1555 		TP_ARGS(vcpu_id, hv_timer_in_use),
1556 		TP_STRUCT__entry(
1557 			__field(unsigned int, vcpu_id)
1558 			__field(unsigned int, hv_timer_in_use)
1559 			),
1560 		TP_fast_assign(
1561 			__entry->vcpu_id = vcpu_id;
1562 			__entry->hv_timer_in_use = hv_timer_in_use;
1563 			),
1564 		TP_printk("vcpu_id %x hv_timer %x",
1565 			__entry->vcpu_id,
1566 			__entry->hv_timer_in_use)
1567 );
1568 
1569 /*
1570  * Tracepoint for kvm_hv_flush_tlb.
1571  */
1572 TRACE_EVENT(kvm_hv_flush_tlb,
1573 	TP_PROTO(u64 processor_mask, u64 address_space, u64 flags, bool guest_mode),
1574 	TP_ARGS(processor_mask, address_space, flags, guest_mode),
1575 
1576 	TP_STRUCT__entry(
1577 		__field(u64, processor_mask)
1578 		__field(u64, address_space)
1579 		__field(u64, flags)
1580 		__field(bool, guest_mode)
1581 	),
1582 
1583 	TP_fast_assign(
1584 		__entry->processor_mask = processor_mask;
1585 		__entry->address_space = address_space;
1586 		__entry->flags = flags;
1587 		__entry->guest_mode = guest_mode;
1588 	),
1589 
1590 	TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx %s",
1591 		  __entry->processor_mask, __entry->address_space,
1592 		  __entry->flags, __entry->guest_mode ? "(L2)" : "")
1593 );
1594 
1595 /*
1596  * Tracepoint for kvm_hv_flush_tlb_ex.
1597  */
1598 TRACE_EVENT(kvm_hv_flush_tlb_ex,
1599 	TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags, bool guest_mode),
1600 	TP_ARGS(valid_bank_mask, format, address_space, flags, guest_mode),
1601 
1602 	TP_STRUCT__entry(
1603 		__field(u64, valid_bank_mask)
1604 		__field(u64, format)
1605 		__field(u64, address_space)
1606 		__field(u64, flags)
1607 		__field(bool, guest_mode)
1608 	),
1609 
1610 	TP_fast_assign(
1611 		__entry->valid_bank_mask = valid_bank_mask;
1612 		__entry->format = format;
1613 		__entry->address_space = address_space;
1614 		__entry->flags = flags;
1615 		__entry->guest_mode = guest_mode;
1616 	),
1617 
1618 	TP_printk("valid_bank_mask 0x%llx format 0x%llx "
1619 		  "address_space 0x%llx flags 0x%llx %s",
1620 		  __entry->valid_bank_mask, __entry->format,
1621 		  __entry->address_space, __entry->flags,
1622 		  __entry->guest_mode ? "(L2)" : "")
1623 );
1624 
1625 /*
1626  * Tracepoints for kvm_hv_send_ipi.
1627  */
1628 TRACE_EVENT(kvm_hv_send_ipi,
1629 	TP_PROTO(u32 vector, u64 processor_mask),
1630 	TP_ARGS(vector, processor_mask),
1631 
1632 	TP_STRUCT__entry(
1633 		__field(u32, vector)
1634 		__field(u64, processor_mask)
1635 	),
1636 
1637 	TP_fast_assign(
1638 		__entry->vector = vector;
1639 		__entry->processor_mask = processor_mask;
1640 	),
1641 
1642 	TP_printk("vector %x processor_mask 0x%llx",
1643 		  __entry->vector, __entry->processor_mask)
1644 );
1645 
1646 TRACE_EVENT(kvm_hv_send_ipi_ex,
1647 	TP_PROTO(u32 vector, u64 format, u64 valid_bank_mask),
1648 	TP_ARGS(vector, format, valid_bank_mask),
1649 
1650 	TP_STRUCT__entry(
1651 		__field(u32, vector)
1652 		__field(u64, format)
1653 		__field(u64, valid_bank_mask)
1654 	),
1655 
1656 	TP_fast_assign(
1657 		__entry->vector = vector;
1658 		__entry->format = format;
1659 		__entry->valid_bank_mask = valid_bank_mask;
1660 	),
1661 
1662 	TP_printk("vector %x format %llx valid_bank_mask 0x%llx",
1663 		  __entry->vector, __entry->format,
1664 		  __entry->valid_bank_mask)
1665 );
1666 
1667 TRACE_EVENT(kvm_pv_tlb_flush,
1668 	TP_PROTO(unsigned int vcpu_id, bool need_flush_tlb),
1669 	TP_ARGS(vcpu_id, need_flush_tlb),
1670 
1671 	TP_STRUCT__entry(
1672 		__field(	unsigned int,	vcpu_id		)
1673 		__field(	bool,	need_flush_tlb		)
1674 	),
1675 
1676 	TP_fast_assign(
1677 		__entry->vcpu_id	= vcpu_id;
1678 		__entry->need_flush_tlb = need_flush_tlb;
1679 	),
1680 
1681 	TP_printk("vcpu %u need_flush_tlb %s", __entry->vcpu_id,
1682 		__entry->need_flush_tlb ? "true" : "false")
1683 );
1684 
1685 /*
1686  * Tracepoint for failed nested VMX VM-Enter.
1687  */
1688 TRACE_EVENT(kvm_nested_vmenter_failed,
1689 	TP_PROTO(const char *msg, u32 err),
1690 	TP_ARGS(msg, err),
1691 
1692 	TP_STRUCT__entry(
1693 		__string(msg, msg)
1694 		__field(u32, err)
1695 	),
1696 
1697 	TP_fast_assign(
1698 		__assign_str(msg);
1699 		__entry->err = err;
1700 	),
1701 
1702 	TP_printk("%s%s", __get_str(msg), !__entry->err ? "" :
1703 		__print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS))
1704 );
1705 
1706 /*
1707  * Tracepoint for syndbg_set_msr.
1708  */
1709 TRACE_EVENT(kvm_hv_syndbg_set_msr,
1710 	TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data),
1711 	TP_ARGS(vcpu_id, vp_index, msr, data),
1712 
1713 	TP_STRUCT__entry(
1714 		__field(int, vcpu_id)
1715 		__field(u32, vp_index)
1716 		__field(u32, msr)
1717 		__field(u64, data)
1718 	),
1719 
1720 	TP_fast_assign(
1721 		__entry->vcpu_id = vcpu_id;
1722 		__entry->vp_index = vp_index;
1723 		__entry->msr = msr;
1724 		__entry->data = data;
1725 	),
1726 
1727 	TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx",
1728 		  __entry->vcpu_id, __entry->vp_index, __entry->msr,
1729 		  __entry->data)
1730 );
1731 
1732 /*
1733  * Tracepoint for syndbg_get_msr.
1734  */
1735 TRACE_EVENT(kvm_hv_syndbg_get_msr,
1736 	TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data),
1737 	TP_ARGS(vcpu_id, vp_index, msr, data),
1738 
1739 	TP_STRUCT__entry(
1740 		__field(int, vcpu_id)
1741 		__field(u32, vp_index)
1742 		__field(u32, msr)
1743 		__field(u64, data)
1744 	),
1745 
1746 	TP_fast_assign(
1747 		__entry->vcpu_id = vcpu_id;
1748 		__entry->vp_index = vp_index;
1749 		__entry->msr = msr;
1750 		__entry->data = data;
1751 	),
1752 
1753 	TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx",
1754 		  __entry->vcpu_id, __entry->vp_index, __entry->msr,
1755 		  __entry->data)
1756 );
1757 
1758 /*
1759  * Tracepoint for the start of VMGEXIT processing
1760  */
1761 TRACE_EVENT(kvm_vmgexit_enter,
1762 	TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb),
1763 	TP_ARGS(vcpu_id, ghcb),
1764 
1765 	TP_STRUCT__entry(
1766 		__field(unsigned int, vcpu_id)
1767 		__field(u64, exit_reason)
1768 		__field(u64, info1)
1769 		__field(u64, info2)
1770 	),
1771 
1772 	TP_fast_assign(
1773 		__entry->vcpu_id     = vcpu_id;
1774 		__entry->exit_reason = ghcb->save.sw_exit_code;
1775 		__entry->info1       = ghcb->save.sw_exit_info_1;
1776 		__entry->info2       = ghcb->save.sw_exit_info_2;
1777 	),
1778 
1779 	TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx",
1780 		  __entry->vcpu_id, __entry->exit_reason,
1781 		  __entry->info1, __entry->info2)
1782 );
1783 
1784 /*
1785  * Tracepoint for the end of VMGEXIT processing
1786  */
1787 TRACE_EVENT(kvm_vmgexit_exit,
1788 	TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb),
1789 	TP_ARGS(vcpu_id, ghcb),
1790 
1791 	TP_STRUCT__entry(
1792 		__field(unsigned int, vcpu_id)
1793 		__field(u64, exit_reason)
1794 		__field(u64, info1)
1795 		__field(u64, info2)
1796 	),
1797 
1798 	TP_fast_assign(
1799 		__entry->vcpu_id     = vcpu_id;
1800 		__entry->exit_reason = ghcb->save.sw_exit_code;
1801 		__entry->info1       = ghcb->save.sw_exit_info_1;
1802 		__entry->info2       = ghcb->save.sw_exit_info_2;
1803 	),
1804 
1805 	TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx",
1806 		  __entry->vcpu_id, __entry->exit_reason,
1807 		  __entry->info1, __entry->info2)
1808 );
1809 
1810 /*
1811  * Tracepoint for the start of VMGEXIT MSR procotol processing
1812  */
1813 TRACE_EVENT(kvm_vmgexit_msr_protocol_enter,
1814 	TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa),
1815 	TP_ARGS(vcpu_id, ghcb_gpa),
1816 
1817 	TP_STRUCT__entry(
1818 		__field(unsigned int, vcpu_id)
1819 		__field(u64, ghcb_gpa)
1820 	),
1821 
1822 	TP_fast_assign(
1823 		__entry->vcpu_id  = vcpu_id;
1824 		__entry->ghcb_gpa = ghcb_gpa;
1825 	),
1826 
1827 	TP_printk("vcpu %u, ghcb_gpa %016llx",
1828 		  __entry->vcpu_id, __entry->ghcb_gpa)
1829 );
1830 
1831 /*
1832  * Tracepoint for the end of VMGEXIT MSR procotol processing
1833  */
1834 TRACE_EVENT(kvm_vmgexit_msr_protocol_exit,
1835 	TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa, int result),
1836 	TP_ARGS(vcpu_id, ghcb_gpa, result),
1837 
1838 	TP_STRUCT__entry(
1839 		__field(unsigned int, vcpu_id)
1840 		__field(u64, ghcb_gpa)
1841 		__field(int, result)
1842 	),
1843 
1844 	TP_fast_assign(
1845 		__entry->vcpu_id  = vcpu_id;
1846 		__entry->ghcb_gpa = ghcb_gpa;
1847 		__entry->result   = result;
1848 	),
1849 
1850 	TP_printk("vcpu %u, ghcb_gpa %016llx, result %d",
1851 		  __entry->vcpu_id, __entry->ghcb_gpa, __entry->result)
1852 );
1853 
1854 /*
1855  * Tracepoint for #NPFs due to RMP faults.
1856  */
1857 TRACE_EVENT(kvm_rmp_fault,
1858 	TP_PROTO(struct kvm_vcpu *vcpu, u64 gpa, u64 pfn, u64 error_code,
1859 		 int rmp_level, int psmash_ret),
1860 	TP_ARGS(vcpu, gpa, pfn, error_code, rmp_level, psmash_ret),
1861 
1862 	TP_STRUCT__entry(
1863 		__field(unsigned int, vcpu_id)
1864 		__field(u64, gpa)
1865 		__field(u64, pfn)
1866 		__field(u64, error_code)
1867 		__field(int, rmp_level)
1868 		__field(int, psmash_ret)
1869 	),
1870 
1871 	TP_fast_assign(
1872 		__entry->vcpu_id	= vcpu->vcpu_id;
1873 		__entry->gpa		= gpa;
1874 		__entry->pfn		= pfn;
1875 		__entry->error_code	= error_code;
1876 		__entry->rmp_level	= rmp_level;
1877 		__entry->psmash_ret	= psmash_ret;
1878 	),
1879 
1880 	TP_printk("vcpu %u gpa %016llx pfn 0x%llx error_code 0x%llx rmp_level %d psmash_ret %d",
1881 		  __entry->vcpu_id, __entry->gpa, __entry->pfn,
1882 		  __entry->error_code, __entry->rmp_level, __entry->psmash_ret)
1883 );
1884 
1885 #endif /* _TRACE_KVM_H */
1886 
1887 #undef TRACE_INCLUDE_PATH
1888 #define TRACE_INCLUDE_PATH ../../arch/x86/kvm
1889 #undef TRACE_INCLUDE_FILE
1890 #define TRACE_INCLUDE_FILE trace
1891 
1892 /* This part must be outside protection */
1893 #include <trace/define_trace.h>
1894