1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2022 Ventana Micro Systems Inc.
4 */
5
6 #include <linux/bitmap.h>
7 #include <linux/cpumask.h>
8 #include <linux/errno.h>
9 #include <linux/err.h>
10 #include <linux/module.h>
11 #include <linux/smp.h>
12 #include <linux/kvm_host.h>
13 #include <asm/cacheflush.h>
14 #include <asm/csr.h>
15 #include <asm/cpufeature.h>
16 #include <asm/insn-def.h>
17 #include <asm/kvm_nacl.h>
18
19 #define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
20
kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,gpa_t gpa,gpa_t gpsz,unsigned long order)21 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
22 gpa_t gpa, gpa_t gpsz,
23 unsigned long order)
24 {
25 gpa_t pos;
26
27 if (PTRS_PER_PTE < (gpsz >> order)) {
28 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
29 return;
30 }
31
32 if (has_svinval()) {
33 asm volatile (SFENCE_W_INVAL() ::: "memory");
34 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
35 asm volatile (HINVAL_GVMA(%0, %1)
36 : : "r" (pos >> 2), "r" (vmid) : "memory");
37 asm volatile (SFENCE_INVAL_IR() ::: "memory");
38 } else {
39 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
40 asm volatile (HFENCE_GVMA(%0, %1)
41 : : "r" (pos >> 2), "r" (vmid) : "memory");
42 }
43 }
44
kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)45 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)
46 {
47 asm volatile(HFENCE_GVMA(zero, %0) : : "r" (vmid) : "memory");
48 }
49
kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa,gpa_t gpsz,unsigned long order)50 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
51 unsigned long order)
52 {
53 gpa_t pos;
54
55 if (PTRS_PER_PTE < (gpsz >> order)) {
56 kvm_riscv_local_hfence_gvma_all();
57 return;
58 }
59
60 if (has_svinval()) {
61 asm volatile (SFENCE_W_INVAL() ::: "memory");
62 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
63 asm volatile(HINVAL_GVMA(%0, zero)
64 : : "r" (pos >> 2) : "memory");
65 asm volatile (SFENCE_INVAL_IR() ::: "memory");
66 } else {
67 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
68 asm volatile(HFENCE_GVMA(%0, zero)
69 : : "r" (pos >> 2) : "memory");
70 }
71 }
72
kvm_riscv_local_hfence_gvma_all(void)73 void kvm_riscv_local_hfence_gvma_all(void)
74 {
75 asm volatile(HFENCE_GVMA(zero, zero) : : : "memory");
76 }
77
kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,unsigned long asid,unsigned long gva,unsigned long gvsz,unsigned long order)78 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
79 unsigned long asid,
80 unsigned long gva,
81 unsigned long gvsz,
82 unsigned long order)
83 {
84 unsigned long pos, hgatp;
85
86 if (PTRS_PER_PTE < (gvsz >> order)) {
87 kvm_riscv_local_hfence_vvma_asid_all(vmid, asid);
88 return;
89 }
90
91 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
92
93 if (has_svinval()) {
94 asm volatile (SFENCE_W_INVAL() ::: "memory");
95 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
96 asm volatile(HINVAL_VVMA(%0, %1)
97 : : "r" (pos), "r" (asid) : "memory");
98 asm volatile (SFENCE_INVAL_IR() ::: "memory");
99 } else {
100 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
101 asm volatile(HFENCE_VVMA(%0, %1)
102 : : "r" (pos), "r" (asid) : "memory");
103 }
104
105 csr_write(CSR_HGATP, hgatp);
106 }
107
kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,unsigned long asid)108 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
109 unsigned long asid)
110 {
111 unsigned long hgatp;
112
113 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
114
115 asm volatile(HFENCE_VVMA(zero, %0) : : "r" (asid) : "memory");
116
117 csr_write(CSR_HGATP, hgatp);
118 }
119
kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,unsigned long gva,unsigned long gvsz,unsigned long order)120 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
121 unsigned long gva, unsigned long gvsz,
122 unsigned long order)
123 {
124 unsigned long pos, hgatp;
125
126 if (PTRS_PER_PTE < (gvsz >> order)) {
127 kvm_riscv_local_hfence_vvma_all(vmid);
128 return;
129 }
130
131 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
132
133 if (has_svinval()) {
134 asm volatile (SFENCE_W_INVAL() ::: "memory");
135 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
136 asm volatile(HINVAL_VVMA(%0, zero)
137 : : "r" (pos) : "memory");
138 asm volatile (SFENCE_INVAL_IR() ::: "memory");
139 } else {
140 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
141 asm volatile(HFENCE_VVMA(%0, zero)
142 : : "r" (pos) : "memory");
143 }
144
145 csr_write(CSR_HGATP, hgatp);
146 }
147
kvm_riscv_local_hfence_vvma_all(unsigned long vmid)148 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid)
149 {
150 unsigned long hgatp;
151
152 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
153
154 asm volatile(HFENCE_VVMA(zero, zero) : : : "memory");
155
156 csr_write(CSR_HGATP, hgatp);
157 }
158
kvm_riscv_local_tlb_sanitize(struct kvm_vcpu * vcpu)159 void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu)
160 {
161 unsigned long vmid;
162
163 if (!kvm_riscv_gstage_vmid_bits() ||
164 vcpu->arch.last_exit_cpu == vcpu->cpu)
165 return;
166
167 /*
168 * On RISC-V platforms with hardware VMID support, we share same
169 * VMID for all VCPUs of a particular Guest/VM. This means we might
170 * have stale G-stage TLB entries on the current Host CPU due to
171 * some other VCPU of the same Guest which ran previously on the
172 * current Host CPU.
173 *
174 * To cleanup stale TLB entries, we simply flush all G-stage TLB
175 * entries by VMID whenever underlying Host CPU changes for a VCPU.
176 */
177
178 vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
179 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
180 }
181
kvm_riscv_fence_i_process(struct kvm_vcpu * vcpu)182 void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
183 {
184 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_RCVD);
185 local_flush_icache_all();
186 }
187
kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu * vcpu)188 void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu)
189 {
190 struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
191 unsigned long vmid = READ_ONCE(v->vmid);
192
193 if (kvm_riscv_nacl_available())
194 nacl_hfence_gvma_vmid_all(nacl_shmem(), vmid);
195 else
196 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
197 }
198
kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu * vcpu)199 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu)
200 {
201 struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
202 unsigned long vmid = READ_ONCE(v->vmid);
203
204 if (kvm_riscv_nacl_available())
205 nacl_hfence_vvma_all(nacl_shmem(), vmid);
206 else
207 kvm_riscv_local_hfence_vvma_all(vmid);
208 }
209
vcpu_hfence_dequeue(struct kvm_vcpu * vcpu,struct kvm_riscv_hfence * out_data)210 static bool vcpu_hfence_dequeue(struct kvm_vcpu *vcpu,
211 struct kvm_riscv_hfence *out_data)
212 {
213 bool ret = false;
214 struct kvm_vcpu_arch *varch = &vcpu->arch;
215
216 spin_lock(&varch->hfence_lock);
217
218 if (varch->hfence_queue[varch->hfence_head].type) {
219 memcpy(out_data, &varch->hfence_queue[varch->hfence_head],
220 sizeof(*out_data));
221 varch->hfence_queue[varch->hfence_head].type = 0;
222
223 varch->hfence_head++;
224 if (varch->hfence_head == KVM_RISCV_VCPU_MAX_HFENCE)
225 varch->hfence_head = 0;
226
227 ret = true;
228 }
229
230 spin_unlock(&varch->hfence_lock);
231
232 return ret;
233 }
234
vcpu_hfence_enqueue(struct kvm_vcpu * vcpu,const struct kvm_riscv_hfence * data)235 static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
236 const struct kvm_riscv_hfence *data)
237 {
238 bool ret = false;
239 struct kvm_vcpu_arch *varch = &vcpu->arch;
240
241 spin_lock(&varch->hfence_lock);
242
243 if (!varch->hfence_queue[varch->hfence_tail].type) {
244 memcpy(&varch->hfence_queue[varch->hfence_tail],
245 data, sizeof(*data));
246
247 varch->hfence_tail++;
248 if (varch->hfence_tail == KVM_RISCV_VCPU_MAX_HFENCE)
249 varch->hfence_tail = 0;
250
251 ret = true;
252 }
253
254 spin_unlock(&varch->hfence_lock);
255
256 return ret;
257 }
258
kvm_riscv_hfence_process(struct kvm_vcpu * vcpu)259 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
260 {
261 unsigned long vmid;
262 struct kvm_riscv_hfence d = { 0 };
263 struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
264
265 while (vcpu_hfence_dequeue(vcpu, &d)) {
266 switch (d.type) {
267 case KVM_RISCV_HFENCE_UNKNOWN:
268 break;
269 case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
270 vmid = READ_ONCE(v->vmid);
271 if (kvm_riscv_nacl_available())
272 nacl_hfence_gvma_vmid(nacl_shmem(), vmid,
273 d.addr, d.size, d.order);
274 else
275 kvm_riscv_local_hfence_gvma_vmid_gpa(vmid, d.addr,
276 d.size, d.order);
277 break;
278 case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
279 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
280 vmid = READ_ONCE(v->vmid);
281 if (kvm_riscv_nacl_available())
282 nacl_hfence_vvma_asid(nacl_shmem(), vmid, d.asid,
283 d.addr, d.size, d.order);
284 else
285 kvm_riscv_local_hfence_vvma_asid_gva(vmid, d.asid, d.addr,
286 d.size, d.order);
287 break;
288 case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
289 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
290 vmid = READ_ONCE(v->vmid);
291 if (kvm_riscv_nacl_available())
292 nacl_hfence_vvma_asid_all(nacl_shmem(), vmid, d.asid);
293 else
294 kvm_riscv_local_hfence_vvma_asid_all(vmid, d.asid);
295 break;
296 case KVM_RISCV_HFENCE_VVMA_GVA:
297 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
298 vmid = READ_ONCE(v->vmid);
299 if (kvm_riscv_nacl_available())
300 nacl_hfence_vvma(nacl_shmem(), vmid,
301 d.addr, d.size, d.order);
302 else
303 kvm_riscv_local_hfence_vvma_gva(vmid, d.addr,
304 d.size, d.order);
305 break;
306 default:
307 break;
308 }
309 }
310 }
311
make_xfence_request(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned int req,unsigned int fallback_req,const struct kvm_riscv_hfence * data)312 static void make_xfence_request(struct kvm *kvm,
313 unsigned long hbase, unsigned long hmask,
314 unsigned int req, unsigned int fallback_req,
315 const struct kvm_riscv_hfence *data)
316 {
317 unsigned long i;
318 struct kvm_vcpu *vcpu;
319 unsigned int actual_req = req;
320 DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
321
322 bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
323 kvm_for_each_vcpu(i, vcpu, kvm) {
324 if (hbase != -1UL) {
325 if (vcpu->vcpu_id < hbase)
326 continue;
327 if (!(hmask & (1UL << (vcpu->vcpu_id - hbase))))
328 continue;
329 }
330
331 bitmap_set(vcpu_mask, i, 1);
332
333 if (!data || !data->type)
334 continue;
335
336 /*
337 * Enqueue hfence data to VCPU hfence queue. If we don't
338 * have space in the VCPU hfence queue then fallback to
339 * a more conservative hfence request.
340 */
341 if (!vcpu_hfence_enqueue(vcpu, data))
342 actual_req = fallback_req;
343 }
344
345 kvm_make_vcpus_request_mask(kvm, actual_req, vcpu_mask);
346 }
347
kvm_riscv_fence_i(struct kvm * kvm,unsigned long hbase,unsigned long hmask)348 void kvm_riscv_fence_i(struct kvm *kvm,
349 unsigned long hbase, unsigned long hmask)
350 {
351 make_xfence_request(kvm, hbase, hmask, KVM_REQ_FENCE_I,
352 KVM_REQ_FENCE_I, NULL);
353 }
354
kvm_riscv_hfence_gvma_vmid_gpa(struct kvm * kvm,unsigned long hbase,unsigned long hmask,gpa_t gpa,gpa_t gpsz,unsigned long order)355 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
356 unsigned long hbase, unsigned long hmask,
357 gpa_t gpa, gpa_t gpsz,
358 unsigned long order)
359 {
360 struct kvm_riscv_hfence data;
361
362 data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA;
363 data.asid = 0;
364 data.addr = gpa;
365 data.size = gpsz;
366 data.order = order;
367 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
368 KVM_REQ_HFENCE_GVMA_VMID_ALL, &data);
369 }
370
kvm_riscv_hfence_gvma_vmid_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask)371 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
372 unsigned long hbase, unsigned long hmask)
373 {
374 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_GVMA_VMID_ALL,
375 KVM_REQ_HFENCE_GVMA_VMID_ALL, NULL);
376 }
377
kvm_riscv_hfence_vvma_asid_gva(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long gva,unsigned long gvsz,unsigned long order,unsigned long asid)378 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
379 unsigned long hbase, unsigned long hmask,
380 unsigned long gva, unsigned long gvsz,
381 unsigned long order, unsigned long asid)
382 {
383 struct kvm_riscv_hfence data;
384
385 data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA;
386 data.asid = asid;
387 data.addr = gva;
388 data.size = gvsz;
389 data.order = order;
390 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
391 KVM_REQ_HFENCE_VVMA_ALL, &data);
392 }
393
kvm_riscv_hfence_vvma_asid_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long asid)394 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
395 unsigned long hbase, unsigned long hmask,
396 unsigned long asid)
397 {
398 struct kvm_riscv_hfence data;
399
400 data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL;
401 data.asid = asid;
402 data.addr = data.size = data.order = 0;
403 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
404 KVM_REQ_HFENCE_VVMA_ALL, &data);
405 }
406
kvm_riscv_hfence_vvma_gva(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long gva,unsigned long gvsz,unsigned long order)407 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
408 unsigned long hbase, unsigned long hmask,
409 unsigned long gva, unsigned long gvsz,
410 unsigned long order)
411 {
412 struct kvm_riscv_hfence data;
413
414 data.type = KVM_RISCV_HFENCE_VVMA_GVA;
415 data.asid = 0;
416 data.addr = gva;
417 data.size = gvsz;
418 data.order = order;
419 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
420 KVM_REQ_HFENCE_VVMA_ALL, &data);
421 }
422
kvm_riscv_hfence_vvma_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask)423 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
424 unsigned long hbase, unsigned long hmask)
425 {
426 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_VVMA_ALL,
427 KVM_REQ_HFENCE_VVMA_ALL, NULL);
428 }
429