1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_LOONGARCH_KVM_PARA_H
3 #define _ASM_LOONGARCH_KVM_PARA_H
4 
5 #include <uapi/asm/kvm_para.h>
6 
7 /*
8  * Hypercall code field
9  */
10 #define HYPERVISOR_KVM			1
11 #define HYPERVISOR_VENDOR_SHIFT		8
12 #define HYPERCALL_ENCODE(vendor, code)	((vendor << HYPERVISOR_VENDOR_SHIFT) + code)
13 
14 #define KVM_HCALL_CODE_SERVICE		0
15 #define KVM_HCALL_CODE_SWDBG		1
16 #define KVM_HCALL_CODE_USER_SERVICE	2
17 
18 #define KVM_HCALL_SERVICE		HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SERVICE)
19 #define  KVM_HCALL_FUNC_IPI		1
20 #define  KVM_HCALL_FUNC_NOTIFY		2
21 
22 #define KVM_HCALL_SWDBG			HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG)
23 
24 #define KVM_HCALL_USER_SERVICE		HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_USER_SERVICE)
25 
26 /*
27  * LoongArch hypercall return code
28  */
29 #define KVM_HCALL_SUCCESS		0
30 #define KVM_HCALL_INVALID_CODE		-1UL
31 #define KVM_HCALL_INVALID_PARAMETER	-2UL
32 
33 #define KVM_STEAL_PHYS_VALID		BIT_ULL(0)
34 #define KVM_STEAL_PHYS_MASK		GENMASK_ULL(63, 6)
35 
36 struct kvm_steal_time {
37 	__u64 steal;
38 	__u32 version;
39 	__u32 flags;
40 	__u32 pad[12];
41 };
42 
43 /*
44  * Hypercall interface for KVM hypervisor
45  *
46  * a0: function identifier
47  * a1-a5: args
48  * Return value will be placed in a0.
49  * Up to 5 arguments are passed in a1, a2, a3, a4, a5.
50  */
kvm_hypercall0(u64 fid)51 static __always_inline long kvm_hypercall0(u64 fid)
52 {
53 	register long ret asm("a0");
54 	register unsigned long fun asm("a0") = fid;
55 
56 	__asm__ __volatile__(
57 		"hvcl "__stringify(KVM_HCALL_SERVICE)
58 		: "=r" (ret)
59 		: "r" (fun)
60 		: "memory"
61 		);
62 
63 	return ret;
64 }
65 
kvm_hypercall1(u64 fid,unsigned long arg0)66 static __always_inline long kvm_hypercall1(u64 fid, unsigned long arg0)
67 {
68 	register long ret asm("a0");
69 	register unsigned long fun asm("a0") = fid;
70 	register unsigned long a1  asm("a1") = arg0;
71 
72 	__asm__ __volatile__(
73 		"hvcl "__stringify(KVM_HCALL_SERVICE)
74 		: "=r" (ret)
75 		: "r" (fun), "r" (a1)
76 		: "memory"
77 		);
78 
79 	return ret;
80 }
81 
kvm_hypercall2(u64 fid,unsigned long arg0,unsigned long arg1)82 static __always_inline long kvm_hypercall2(u64 fid,
83 		unsigned long arg0, unsigned long arg1)
84 {
85 	register long ret asm("a0");
86 	register unsigned long fun asm("a0") = fid;
87 	register unsigned long a1  asm("a1") = arg0;
88 	register unsigned long a2  asm("a2") = arg1;
89 
90 	__asm__ __volatile__(
91 		"hvcl "__stringify(KVM_HCALL_SERVICE)
92 		: "=r" (ret)
93 		: "r" (fun), "r" (a1), "r" (a2)
94 		: "memory"
95 		);
96 
97 	return ret;
98 }
99 
kvm_hypercall3(u64 fid,unsigned long arg0,unsigned long arg1,unsigned long arg2)100 static __always_inline long kvm_hypercall3(u64 fid,
101 	unsigned long arg0, unsigned long arg1, unsigned long arg2)
102 {
103 	register long ret asm("a0");
104 	register unsigned long fun asm("a0") = fid;
105 	register unsigned long a1  asm("a1") = arg0;
106 	register unsigned long a2  asm("a2") = arg1;
107 	register unsigned long a3  asm("a3") = arg2;
108 
109 	__asm__ __volatile__(
110 		"hvcl "__stringify(KVM_HCALL_SERVICE)
111 		: "=r" (ret)
112 		: "r" (fun), "r" (a1), "r" (a2), "r" (a3)
113 		: "memory"
114 		);
115 
116 	return ret;
117 }
118 
kvm_hypercall4(u64 fid,unsigned long arg0,unsigned long arg1,unsigned long arg2,unsigned long arg3)119 static __always_inline long kvm_hypercall4(u64 fid,
120 		unsigned long arg0, unsigned long arg1,
121 		unsigned long arg2, unsigned long arg3)
122 {
123 	register long ret asm("a0");
124 	register unsigned long fun asm("a0") = fid;
125 	register unsigned long a1  asm("a1") = arg0;
126 	register unsigned long a2  asm("a2") = arg1;
127 	register unsigned long a3  asm("a3") = arg2;
128 	register unsigned long a4  asm("a4") = arg3;
129 
130 	__asm__ __volatile__(
131 		"hvcl "__stringify(KVM_HCALL_SERVICE)
132 		: "=r" (ret)
133 		: "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4)
134 		: "memory"
135 		);
136 
137 	return ret;
138 }
139 
kvm_hypercall5(u64 fid,unsigned long arg0,unsigned long arg1,unsigned long arg2,unsigned long arg3,unsigned long arg4)140 static __always_inline long kvm_hypercall5(u64 fid,
141 		unsigned long arg0, unsigned long arg1,
142 		unsigned long arg2, unsigned long arg3, unsigned long arg4)
143 {
144 	register long ret asm("a0");
145 	register unsigned long fun asm("a0") = fid;
146 	register unsigned long a1  asm("a1") = arg0;
147 	register unsigned long a2  asm("a2") = arg1;
148 	register unsigned long a3  asm("a3") = arg2;
149 	register unsigned long a4  asm("a4") = arg3;
150 	register unsigned long a5  asm("a5") = arg4;
151 
152 	__asm__ __volatile__(
153 		"hvcl "__stringify(KVM_HCALL_SERVICE)
154 		: "=r" (ret)
155 		: "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5)
156 		: "memory"
157 		);
158 
159 	return ret;
160 }
161 
162 #ifdef CONFIG_PARAVIRT
163 bool kvm_para_available(void);
164 unsigned int kvm_arch_para_features(void);
165 #else
kvm_para_available(void)166 static inline bool kvm_para_available(void)
167 {
168 	return false;
169 }
170 
kvm_arch_para_features(void)171 static inline unsigned int kvm_arch_para_features(void)
172 {
173 	return 0;
174 }
175 #endif
176 
kvm_arch_para_hints(void)177 static inline unsigned int kvm_arch_para_hints(void)
178 {
179 	return 0;
180 }
181 
kvm_check_and_clear_guest_paused(void)182 static inline bool kvm_check_and_clear_guest_paused(void)
183 {
184 	return false;
185 }
186 
187 #endif /* _ASM_LOONGARCH_KVM_PARA_H */
188