1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2019 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Atish Patra <[email protected]>
7 */
8
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <asm/sbi.h>
13 #include <asm/kvm_vcpu_sbi.h>
14
15 #ifndef CONFIG_RISCV_SBI_V01
16 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
17 .extid_start = -1UL,
18 .extid_end = -1UL,
19 .handler = NULL,
20 };
21 #endif
22
23 #ifndef CONFIG_RISCV_PMU_SBI
24 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
25 .extid_start = -1UL,
26 .extid_end = -1UL,
27 .handler = NULL,
28 };
29 #endif
30
31 struct kvm_riscv_sbi_extension_entry {
32 enum KVM_RISCV_SBI_EXT_ID ext_idx;
33 const struct kvm_vcpu_sbi_extension *ext_ptr;
34 };
35
36 static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
37 {
38 .ext_idx = KVM_RISCV_SBI_EXT_V01,
39 .ext_ptr = &vcpu_sbi_ext_v01,
40 },
41 {
42 .ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
43 .ext_ptr = &vcpu_sbi_ext_base,
44 },
45 {
46 .ext_idx = KVM_RISCV_SBI_EXT_TIME,
47 .ext_ptr = &vcpu_sbi_ext_time,
48 },
49 {
50 .ext_idx = KVM_RISCV_SBI_EXT_IPI,
51 .ext_ptr = &vcpu_sbi_ext_ipi,
52 },
53 {
54 .ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
55 .ext_ptr = &vcpu_sbi_ext_rfence,
56 },
57 {
58 .ext_idx = KVM_RISCV_SBI_EXT_SRST,
59 .ext_ptr = &vcpu_sbi_ext_srst,
60 },
61 {
62 .ext_idx = KVM_RISCV_SBI_EXT_HSM,
63 .ext_ptr = &vcpu_sbi_ext_hsm,
64 },
65 {
66 .ext_idx = KVM_RISCV_SBI_EXT_PMU,
67 .ext_ptr = &vcpu_sbi_ext_pmu,
68 },
69 {
70 .ext_idx = KVM_RISCV_SBI_EXT_DBCN,
71 .ext_ptr = &vcpu_sbi_ext_dbcn,
72 },
73 {
74 .ext_idx = KVM_RISCV_SBI_EXT_SUSP,
75 .ext_ptr = &vcpu_sbi_ext_susp,
76 },
77 {
78 .ext_idx = KVM_RISCV_SBI_EXT_STA,
79 .ext_ptr = &vcpu_sbi_ext_sta,
80 },
81 {
82 .ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
83 .ext_ptr = &vcpu_sbi_ext_experimental,
84 },
85 {
86 .ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
87 .ext_ptr = &vcpu_sbi_ext_vendor,
88 },
89 };
90
91 static const struct kvm_riscv_sbi_extension_entry *
riscv_vcpu_get_sbi_ext(struct kvm_vcpu * vcpu,unsigned long idx)92 riscv_vcpu_get_sbi_ext(struct kvm_vcpu *vcpu, unsigned long idx)
93 {
94 const struct kvm_riscv_sbi_extension_entry *sext = NULL;
95
96 if (idx >= KVM_RISCV_SBI_EXT_MAX)
97 return NULL;
98
99 for (int i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
100 if (sbi_ext[i].ext_idx == idx) {
101 sext = &sbi_ext[i];
102 break;
103 }
104 }
105
106 return sext;
107 }
108
riscv_vcpu_supports_sbi_ext(struct kvm_vcpu * vcpu,int idx)109 bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx)
110 {
111 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
112 const struct kvm_riscv_sbi_extension_entry *sext;
113
114 sext = riscv_vcpu_get_sbi_ext(vcpu, idx);
115
116 return sext && scontext->ext_status[sext->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
117 }
118
kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu * vcpu,struct kvm_run * run)119 void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
120 {
121 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
122
123 vcpu->arch.sbi_context.return_handled = 0;
124 vcpu->stat.ecall_exit_stat++;
125 run->exit_reason = KVM_EXIT_RISCV_SBI;
126 run->riscv_sbi.extension_id = cp->a7;
127 run->riscv_sbi.function_id = cp->a6;
128 run->riscv_sbi.args[0] = cp->a0;
129 run->riscv_sbi.args[1] = cp->a1;
130 run->riscv_sbi.args[2] = cp->a2;
131 run->riscv_sbi.args[3] = cp->a3;
132 run->riscv_sbi.args[4] = cp->a4;
133 run->riscv_sbi.args[5] = cp->a5;
134 run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
135 run->riscv_sbi.ret[1] = 0;
136 }
137
kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu * vcpu,struct kvm_run * run,u32 type,u64 reason)138 void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
139 struct kvm_run *run,
140 u32 type, u64 reason)
141 {
142 unsigned long i;
143 struct kvm_vcpu *tmp;
144
145 kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
146 spin_lock(&vcpu->arch.mp_state_lock);
147 WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
148 spin_unlock(&vcpu->arch.mp_state_lock);
149 }
150 kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
151
152 memset(&run->system_event, 0, sizeof(run->system_event));
153 run->system_event.type = type;
154 run->system_event.ndata = 1;
155 run->system_event.data[0] = reason;
156 run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
157 }
158
kvm_riscv_vcpu_sbi_return(struct kvm_vcpu * vcpu,struct kvm_run * run)159 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
160 {
161 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
162
163 /* Handle SBI return only once */
164 if (vcpu->arch.sbi_context.return_handled)
165 return 0;
166 vcpu->arch.sbi_context.return_handled = 1;
167
168 /* Update return values */
169 cp->a0 = run->riscv_sbi.ret[0];
170 cp->a1 = run->riscv_sbi.ret[1];
171
172 /* Move to next instruction */
173 vcpu->arch.guest_context.sepc += 4;
174
175 return 0;
176 }
177
riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)178 static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
179 unsigned long reg_num,
180 unsigned long reg_val)
181 {
182 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
183 const struct kvm_riscv_sbi_extension_entry *sext;
184
185 if (reg_val != 1 && reg_val != 0)
186 return -EINVAL;
187
188 sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
189 if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
190 return -ENOENT;
191
192 scontext->ext_status[sext->ext_idx] = (reg_val) ?
193 KVM_RISCV_SBI_EXT_STATUS_ENABLED :
194 KVM_RISCV_SBI_EXT_STATUS_DISABLED;
195
196 return 0;
197 }
198
riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)199 static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
200 unsigned long reg_num,
201 unsigned long *reg_val)
202 {
203 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
204 const struct kvm_riscv_sbi_extension_entry *sext;
205
206 sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
207 if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
208 return -ENOENT;
209
210 *reg_val = scontext->ext_status[sext->ext_idx] ==
211 KVM_RISCV_SBI_EXT_STATUS_ENABLED;
212
213 return 0;
214 }
215
riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val,bool enable)216 static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
217 unsigned long reg_num,
218 unsigned long reg_val, bool enable)
219 {
220 unsigned long i, ext_id;
221
222 if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
223 return -ENOENT;
224
225 for_each_set_bit(i, ®_val, BITS_PER_LONG) {
226 ext_id = i + reg_num * BITS_PER_LONG;
227 if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
228 break;
229
230 riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
231 }
232
233 return 0;
234 }
235
riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)236 static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
237 unsigned long reg_num,
238 unsigned long *reg_val)
239 {
240 unsigned long i, ext_id, ext_val;
241
242 if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
243 return -ENOENT;
244
245 for (i = 0; i < BITS_PER_LONG; i++) {
246 ext_id = i + reg_num * BITS_PER_LONG;
247 if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
248 break;
249
250 ext_val = 0;
251 riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
252 if (ext_val)
253 *reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
254 }
255
256 return 0;
257 }
258
kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)259 int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
260 const struct kvm_one_reg *reg)
261 {
262 unsigned long __user *uaddr =
263 (unsigned long __user *)(unsigned long)reg->addr;
264 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
265 KVM_REG_SIZE_MASK |
266 KVM_REG_RISCV_SBI_EXT);
267 unsigned long reg_val, reg_subtype;
268
269 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
270 return -EINVAL;
271
272 if (vcpu->arch.ran_atleast_once)
273 return -EBUSY;
274
275 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
276 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
277
278 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
279 return -EFAULT;
280
281 switch (reg_subtype) {
282 case KVM_REG_RISCV_SBI_SINGLE:
283 return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
284 case KVM_REG_RISCV_SBI_MULTI_EN:
285 return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
286 case KVM_REG_RISCV_SBI_MULTI_DIS:
287 return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
288 default:
289 return -ENOENT;
290 }
291
292 return 0;
293 }
294
kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)295 int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
296 const struct kvm_one_reg *reg)
297 {
298 int rc;
299 unsigned long __user *uaddr =
300 (unsigned long __user *)(unsigned long)reg->addr;
301 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
302 KVM_REG_SIZE_MASK |
303 KVM_REG_RISCV_SBI_EXT);
304 unsigned long reg_val, reg_subtype;
305
306 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
307 return -EINVAL;
308
309 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
310 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
311
312 reg_val = 0;
313 switch (reg_subtype) {
314 case KVM_REG_RISCV_SBI_SINGLE:
315 rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, ®_val);
316 break;
317 case KVM_REG_RISCV_SBI_MULTI_EN:
318 case KVM_REG_RISCV_SBI_MULTI_DIS:
319 rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, ®_val);
320 if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
321 reg_val = ~reg_val;
322 break;
323 default:
324 rc = -ENOENT;
325 }
326 if (rc)
327 return rc;
328
329 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
330 return -EFAULT;
331
332 return 0;
333 }
334
kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)335 int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu,
336 const struct kvm_one_reg *reg)
337 {
338 unsigned long __user *uaddr =
339 (unsigned long __user *)(unsigned long)reg->addr;
340 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
341 KVM_REG_SIZE_MASK |
342 KVM_REG_RISCV_SBI_STATE);
343 unsigned long reg_subtype, reg_val;
344
345 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
346 return -EINVAL;
347
348 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
349 return -EFAULT;
350
351 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
352 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
353
354 switch (reg_subtype) {
355 case KVM_REG_RISCV_SBI_STA:
356 return kvm_riscv_vcpu_set_reg_sbi_sta(vcpu, reg_num, reg_val);
357 default:
358 return -EINVAL;
359 }
360
361 return 0;
362 }
363
kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)364 int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu,
365 const struct kvm_one_reg *reg)
366 {
367 unsigned long __user *uaddr =
368 (unsigned long __user *)(unsigned long)reg->addr;
369 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
370 KVM_REG_SIZE_MASK |
371 KVM_REG_RISCV_SBI_STATE);
372 unsigned long reg_subtype, reg_val;
373 int ret;
374
375 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
376 return -EINVAL;
377
378 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
379 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
380
381 switch (reg_subtype) {
382 case KVM_REG_RISCV_SBI_STA:
383 ret = kvm_riscv_vcpu_get_reg_sbi_sta(vcpu, reg_num, ®_val);
384 break;
385 default:
386 return -EINVAL;
387 }
388
389 if (ret)
390 return ret;
391
392 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
393 return -EFAULT;
394
395 return 0;
396 }
397
kvm_vcpu_sbi_find_ext(struct kvm_vcpu * vcpu,unsigned long extid)398 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
399 struct kvm_vcpu *vcpu, unsigned long extid)
400 {
401 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
402 const struct kvm_riscv_sbi_extension_entry *entry;
403 const struct kvm_vcpu_sbi_extension *ext;
404 int i;
405
406 for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
407 entry = &sbi_ext[i];
408 ext = entry->ext_ptr;
409
410 if (ext->extid_start <= extid && ext->extid_end >= extid) {
411 if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
412 scontext->ext_status[entry->ext_idx] ==
413 KVM_RISCV_SBI_EXT_STATUS_ENABLED)
414 return ext;
415
416 return NULL;
417 }
418 }
419
420 return NULL;
421 }
422
kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu * vcpu,struct kvm_run * run)423 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
424 {
425 int ret = 1;
426 bool next_sepc = true;
427 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
428 const struct kvm_vcpu_sbi_extension *sbi_ext;
429 struct kvm_cpu_trap utrap = {0};
430 struct kvm_vcpu_sbi_return sbi_ret = {
431 .out_val = 0,
432 .err_val = 0,
433 .utrap = &utrap,
434 };
435 bool ext_is_v01 = false;
436
437 sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
438 if (sbi_ext && sbi_ext->handler) {
439 #ifdef CONFIG_RISCV_SBI_V01
440 if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
441 cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
442 ext_is_v01 = true;
443 #endif
444 ret = sbi_ext->handler(vcpu, run, &sbi_ret);
445 } else {
446 /* Return error for unsupported SBI calls */
447 cp->a0 = SBI_ERR_NOT_SUPPORTED;
448 goto ecall_done;
449 }
450
451 /*
452 * When the SBI extension returns a Linux error code, it exits the ioctl
453 * loop and forwards the error to userspace.
454 */
455 if (ret < 0) {
456 next_sepc = false;
457 goto ecall_done;
458 }
459
460 /* Handle special error cases i.e trap, exit or userspace forward */
461 if (sbi_ret.utrap->scause) {
462 /* No need to increment sepc or exit ioctl loop */
463 ret = 1;
464 sbi_ret.utrap->sepc = cp->sepc;
465 kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
466 next_sepc = false;
467 goto ecall_done;
468 }
469
470 /* Exit ioctl loop or Propagate the error code the guest */
471 if (sbi_ret.uexit) {
472 next_sepc = false;
473 ret = 0;
474 } else {
475 cp->a0 = sbi_ret.err_val;
476 ret = 1;
477 }
478 ecall_done:
479 if (next_sepc)
480 cp->sepc += 4;
481 /* a1 should only be updated when we continue the ioctl loop */
482 if (!ext_is_v01 && ret == 1)
483 cp->a1 = sbi_ret.out_val;
484
485 return ret;
486 }
487
kvm_riscv_vcpu_sbi_init(struct kvm_vcpu * vcpu)488 void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu)
489 {
490 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
491 const struct kvm_riscv_sbi_extension_entry *entry;
492 const struct kvm_vcpu_sbi_extension *ext;
493 int idx, i;
494
495 for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
496 entry = &sbi_ext[i];
497 ext = entry->ext_ptr;
498 idx = entry->ext_idx;
499
500 if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status))
501 continue;
502
503 if (ext->probe && !ext->probe(vcpu)) {
504 scontext->ext_status[idx] = KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
505 continue;
506 }
507
508 scontext->ext_status[idx] = ext->default_disabled ?
509 KVM_RISCV_SBI_EXT_STATUS_DISABLED :
510 KVM_RISCV_SBI_EXT_STATUS_ENABLED;
511 }
512 }
513