1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2023 Ventana Micro Systems Inc.
5  *
6  * Authors:
7  *	Anup Patel <[email protected]>
8  */
9 
10 #include <linux/bitops.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/uaccess.h>
14 #include <linux/kvm_host.h>
15 #include <asm/cacheflush.h>
16 #include <asm/cpufeature.h>
17 #include <asm/kvm_vcpu_vector.h>
18 #include <asm/pgtable.h>
19 #include <asm/vector.h>
20 
21 #define KVM_RISCV_BASE_ISA_MASK		GENMASK(25, 0)
22 
23 #define KVM_ISA_EXT_ARR(ext)		\
24 [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
25 
26 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
27 static const unsigned long kvm_isa_ext_arr[] = {
28 	/* Single letter extensions (alphabetically sorted) */
29 	[KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
30 	[KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
31 	[KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
32 	[KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
33 	[KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
34 	[KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
35 	[KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
36 	[KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
37 	/* Multi letter extensions (alphabetically sorted) */
38 	[KVM_RISCV_ISA_EXT_SMNPM] = RISCV_ISA_EXT_SSNPM,
39 	KVM_ISA_EXT_ARR(SMSTATEEN),
40 	KVM_ISA_EXT_ARR(SSAIA),
41 	KVM_ISA_EXT_ARR(SSCOFPMF),
42 	KVM_ISA_EXT_ARR(SSNPM),
43 	KVM_ISA_EXT_ARR(SSTC),
44 	KVM_ISA_EXT_ARR(SVADE),
45 	KVM_ISA_EXT_ARR(SVADU),
46 	KVM_ISA_EXT_ARR(SVINVAL),
47 	KVM_ISA_EXT_ARR(SVNAPOT),
48 	KVM_ISA_EXT_ARR(SVPBMT),
49 	KVM_ISA_EXT_ARR(SVVPTC),
50 	KVM_ISA_EXT_ARR(ZABHA),
51 	KVM_ISA_EXT_ARR(ZACAS),
52 	KVM_ISA_EXT_ARR(ZAWRS),
53 	KVM_ISA_EXT_ARR(ZBA),
54 	KVM_ISA_EXT_ARR(ZBB),
55 	KVM_ISA_EXT_ARR(ZBC),
56 	KVM_ISA_EXT_ARR(ZBKB),
57 	KVM_ISA_EXT_ARR(ZBKC),
58 	KVM_ISA_EXT_ARR(ZBKX),
59 	KVM_ISA_EXT_ARR(ZBS),
60 	KVM_ISA_EXT_ARR(ZCA),
61 	KVM_ISA_EXT_ARR(ZCB),
62 	KVM_ISA_EXT_ARR(ZCD),
63 	KVM_ISA_EXT_ARR(ZCF),
64 	KVM_ISA_EXT_ARR(ZCMOP),
65 	KVM_ISA_EXT_ARR(ZFA),
66 	KVM_ISA_EXT_ARR(ZFH),
67 	KVM_ISA_EXT_ARR(ZFHMIN),
68 	KVM_ISA_EXT_ARR(ZICBOM),
69 	KVM_ISA_EXT_ARR(ZICBOZ),
70 	KVM_ISA_EXT_ARR(ZICCRSE),
71 	KVM_ISA_EXT_ARR(ZICNTR),
72 	KVM_ISA_EXT_ARR(ZICOND),
73 	KVM_ISA_EXT_ARR(ZICSR),
74 	KVM_ISA_EXT_ARR(ZIFENCEI),
75 	KVM_ISA_EXT_ARR(ZIHINTNTL),
76 	KVM_ISA_EXT_ARR(ZIHINTPAUSE),
77 	KVM_ISA_EXT_ARR(ZIHPM),
78 	KVM_ISA_EXT_ARR(ZIMOP),
79 	KVM_ISA_EXT_ARR(ZKND),
80 	KVM_ISA_EXT_ARR(ZKNE),
81 	KVM_ISA_EXT_ARR(ZKNH),
82 	KVM_ISA_EXT_ARR(ZKR),
83 	KVM_ISA_EXT_ARR(ZKSED),
84 	KVM_ISA_EXT_ARR(ZKSH),
85 	KVM_ISA_EXT_ARR(ZKT),
86 	KVM_ISA_EXT_ARR(ZTSO),
87 	KVM_ISA_EXT_ARR(ZVBB),
88 	KVM_ISA_EXT_ARR(ZVBC),
89 	KVM_ISA_EXT_ARR(ZVFH),
90 	KVM_ISA_EXT_ARR(ZVFHMIN),
91 	KVM_ISA_EXT_ARR(ZVKB),
92 	KVM_ISA_EXT_ARR(ZVKG),
93 	KVM_ISA_EXT_ARR(ZVKNED),
94 	KVM_ISA_EXT_ARR(ZVKNHA),
95 	KVM_ISA_EXT_ARR(ZVKNHB),
96 	KVM_ISA_EXT_ARR(ZVKSED),
97 	KVM_ISA_EXT_ARR(ZVKSH),
98 	KVM_ISA_EXT_ARR(ZVKT),
99 };
100 
kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)101 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
102 {
103 	unsigned long i;
104 
105 	for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
106 		if (kvm_isa_ext_arr[i] == base_ext)
107 			return i;
108 	}
109 
110 	return KVM_RISCV_ISA_EXT_MAX;
111 }
112 
kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)113 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
114 {
115 	switch (ext) {
116 	case KVM_RISCV_ISA_EXT_H:
117 		return false;
118 	case KVM_RISCV_ISA_EXT_SSCOFPMF:
119 		/* Sscofpmf depends on interrupt filtering defined in ssaia */
120 		return __riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSAIA);
121 	case KVM_RISCV_ISA_EXT_SVADU:
122 		/*
123 		 * The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero.
124 		 * Guest OS can use Svadu only when host OS enable Svadu.
125 		 */
126 		return arch_has_hw_pte_young();
127 	case KVM_RISCV_ISA_EXT_V:
128 		return riscv_v_vstate_ctrl_user_allowed();
129 	default:
130 		break;
131 	}
132 
133 	return true;
134 }
135 
kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)136 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
137 {
138 	switch (ext) {
139 	/* Extensions which don't have any mechanism to disable */
140 	case KVM_RISCV_ISA_EXT_A:
141 	case KVM_RISCV_ISA_EXT_C:
142 	case KVM_RISCV_ISA_EXT_I:
143 	case KVM_RISCV_ISA_EXT_M:
144 	case KVM_RISCV_ISA_EXT_SMNPM:
145 	/* There is not architectural config bit to disable sscofpmf completely */
146 	case KVM_RISCV_ISA_EXT_SSCOFPMF:
147 	case KVM_RISCV_ISA_EXT_SSNPM:
148 	case KVM_RISCV_ISA_EXT_SSTC:
149 	case KVM_RISCV_ISA_EXT_SVINVAL:
150 	case KVM_RISCV_ISA_EXT_SVNAPOT:
151 	case KVM_RISCV_ISA_EXT_SVVPTC:
152 	case KVM_RISCV_ISA_EXT_ZABHA:
153 	case KVM_RISCV_ISA_EXT_ZACAS:
154 	case KVM_RISCV_ISA_EXT_ZAWRS:
155 	case KVM_RISCV_ISA_EXT_ZBA:
156 	case KVM_RISCV_ISA_EXT_ZBB:
157 	case KVM_RISCV_ISA_EXT_ZBC:
158 	case KVM_RISCV_ISA_EXT_ZBKB:
159 	case KVM_RISCV_ISA_EXT_ZBKC:
160 	case KVM_RISCV_ISA_EXT_ZBKX:
161 	case KVM_RISCV_ISA_EXT_ZBS:
162 	case KVM_RISCV_ISA_EXT_ZCA:
163 	case KVM_RISCV_ISA_EXT_ZCB:
164 	case KVM_RISCV_ISA_EXT_ZCD:
165 	case KVM_RISCV_ISA_EXT_ZCF:
166 	case KVM_RISCV_ISA_EXT_ZCMOP:
167 	case KVM_RISCV_ISA_EXT_ZFA:
168 	case KVM_RISCV_ISA_EXT_ZFH:
169 	case KVM_RISCV_ISA_EXT_ZFHMIN:
170 	case KVM_RISCV_ISA_EXT_ZICCRSE:
171 	case KVM_RISCV_ISA_EXT_ZICNTR:
172 	case KVM_RISCV_ISA_EXT_ZICOND:
173 	case KVM_RISCV_ISA_EXT_ZICSR:
174 	case KVM_RISCV_ISA_EXT_ZIFENCEI:
175 	case KVM_RISCV_ISA_EXT_ZIHINTNTL:
176 	case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
177 	case KVM_RISCV_ISA_EXT_ZIHPM:
178 	case KVM_RISCV_ISA_EXT_ZIMOP:
179 	case KVM_RISCV_ISA_EXT_ZKND:
180 	case KVM_RISCV_ISA_EXT_ZKNE:
181 	case KVM_RISCV_ISA_EXT_ZKNH:
182 	case KVM_RISCV_ISA_EXT_ZKR:
183 	case KVM_RISCV_ISA_EXT_ZKSED:
184 	case KVM_RISCV_ISA_EXT_ZKSH:
185 	case KVM_RISCV_ISA_EXT_ZKT:
186 	case KVM_RISCV_ISA_EXT_ZTSO:
187 	case KVM_RISCV_ISA_EXT_ZVBB:
188 	case KVM_RISCV_ISA_EXT_ZVBC:
189 	case KVM_RISCV_ISA_EXT_ZVFH:
190 	case KVM_RISCV_ISA_EXT_ZVFHMIN:
191 	case KVM_RISCV_ISA_EXT_ZVKB:
192 	case KVM_RISCV_ISA_EXT_ZVKG:
193 	case KVM_RISCV_ISA_EXT_ZVKNED:
194 	case KVM_RISCV_ISA_EXT_ZVKNHA:
195 	case KVM_RISCV_ISA_EXT_ZVKNHB:
196 	case KVM_RISCV_ISA_EXT_ZVKSED:
197 	case KVM_RISCV_ISA_EXT_ZVKSH:
198 	case KVM_RISCV_ISA_EXT_ZVKT:
199 		return false;
200 	/* Extensions which can be disabled using Smstateen */
201 	case KVM_RISCV_ISA_EXT_SSAIA:
202 		return riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN);
203 	case KVM_RISCV_ISA_EXT_SVADE:
204 		/*
205 		 * The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero.
206 		 * Svade is not allowed to disable when the platform use Svade.
207 		 */
208 		return arch_has_hw_pte_young();
209 	default:
210 		break;
211 	}
212 
213 	return true;
214 }
215 
kvm_riscv_vcpu_setup_isa(struct kvm_vcpu * vcpu)216 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
217 {
218 	unsigned long host_isa, i;
219 
220 	for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
221 		host_isa = kvm_isa_ext_arr[i];
222 		if (__riscv_isa_extension_available(NULL, host_isa) &&
223 		    kvm_riscv_vcpu_isa_enable_allowed(i))
224 			set_bit(host_isa, vcpu->arch.isa);
225 	}
226 }
227 
kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)228 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
229 					 const struct kvm_one_reg *reg)
230 {
231 	unsigned long __user *uaddr =
232 			(unsigned long __user *)(unsigned long)reg->addr;
233 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
234 					    KVM_REG_SIZE_MASK |
235 					    KVM_REG_RISCV_CONFIG);
236 	unsigned long reg_val;
237 
238 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
239 		return -EINVAL;
240 
241 	switch (reg_num) {
242 	case KVM_REG_RISCV_CONFIG_REG(isa):
243 		reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
244 		break;
245 	case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
246 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
247 			return -ENOENT;
248 		reg_val = riscv_cbom_block_size;
249 		break;
250 	case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
251 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
252 			return -ENOENT;
253 		reg_val = riscv_cboz_block_size;
254 		break;
255 	case KVM_REG_RISCV_CONFIG_REG(mvendorid):
256 		reg_val = vcpu->arch.mvendorid;
257 		break;
258 	case KVM_REG_RISCV_CONFIG_REG(marchid):
259 		reg_val = vcpu->arch.marchid;
260 		break;
261 	case KVM_REG_RISCV_CONFIG_REG(mimpid):
262 		reg_val = vcpu->arch.mimpid;
263 		break;
264 	case KVM_REG_RISCV_CONFIG_REG(satp_mode):
265 		reg_val = satp_mode >> SATP_MODE_SHIFT;
266 		break;
267 	default:
268 		return -ENOENT;
269 	}
270 
271 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
272 		return -EFAULT;
273 
274 	return 0;
275 }
276 
kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)277 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
278 					 const struct kvm_one_reg *reg)
279 {
280 	unsigned long __user *uaddr =
281 			(unsigned long __user *)(unsigned long)reg->addr;
282 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
283 					    KVM_REG_SIZE_MASK |
284 					    KVM_REG_RISCV_CONFIG);
285 	unsigned long i, isa_ext, reg_val;
286 
287 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
288 		return -EINVAL;
289 
290 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
291 		return -EFAULT;
292 
293 	switch (reg_num) {
294 	case KVM_REG_RISCV_CONFIG_REG(isa):
295 		/*
296 		 * This ONE REG interface is only defined for
297 		 * single letter extensions.
298 		 */
299 		if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
300 			return -EINVAL;
301 
302 		/*
303 		 * Return early (i.e. do nothing) if reg_val is the same
304 		 * value retrievable via kvm_riscv_vcpu_get_reg_config().
305 		 */
306 		if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
307 			break;
308 
309 		if (!vcpu->arch.ran_atleast_once) {
310 			/* Ignore the enable/disable request for certain extensions */
311 			for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
312 				isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
313 				if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
314 					reg_val &= ~BIT(i);
315 					continue;
316 				}
317 				if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
318 					if (reg_val & BIT(i))
319 						reg_val &= ~BIT(i);
320 				if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
321 					if (!(reg_val & BIT(i)))
322 						reg_val |= BIT(i);
323 			}
324 			reg_val &= riscv_isa_extension_base(NULL);
325 			/* Do not modify anything beyond single letter extensions */
326 			reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
327 				  (reg_val & KVM_RISCV_BASE_ISA_MASK);
328 			vcpu->arch.isa[0] = reg_val;
329 			kvm_riscv_vcpu_fp_reset(vcpu);
330 		} else {
331 			return -EBUSY;
332 		}
333 		break;
334 	case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
335 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
336 			return -ENOENT;
337 		if (reg_val != riscv_cbom_block_size)
338 			return -EINVAL;
339 		break;
340 	case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
341 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
342 			return -ENOENT;
343 		if (reg_val != riscv_cboz_block_size)
344 			return -EINVAL;
345 		break;
346 	case KVM_REG_RISCV_CONFIG_REG(mvendorid):
347 		if (reg_val == vcpu->arch.mvendorid)
348 			break;
349 		if (!vcpu->arch.ran_atleast_once)
350 			vcpu->arch.mvendorid = reg_val;
351 		else
352 			return -EBUSY;
353 		break;
354 	case KVM_REG_RISCV_CONFIG_REG(marchid):
355 		if (reg_val == vcpu->arch.marchid)
356 			break;
357 		if (!vcpu->arch.ran_atleast_once)
358 			vcpu->arch.marchid = reg_val;
359 		else
360 			return -EBUSY;
361 		break;
362 	case KVM_REG_RISCV_CONFIG_REG(mimpid):
363 		if (reg_val == vcpu->arch.mimpid)
364 			break;
365 		if (!vcpu->arch.ran_atleast_once)
366 			vcpu->arch.mimpid = reg_val;
367 		else
368 			return -EBUSY;
369 		break;
370 	case KVM_REG_RISCV_CONFIG_REG(satp_mode):
371 		if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
372 			return -EINVAL;
373 		break;
374 	default:
375 		return -ENOENT;
376 	}
377 
378 	return 0;
379 }
380 
kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)381 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
382 				       const struct kvm_one_reg *reg)
383 {
384 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
385 	unsigned long __user *uaddr =
386 			(unsigned long __user *)(unsigned long)reg->addr;
387 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
388 					    KVM_REG_SIZE_MASK |
389 					    KVM_REG_RISCV_CORE);
390 	unsigned long reg_val;
391 
392 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
393 		return -EINVAL;
394 	if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
395 		return -ENOENT;
396 
397 	if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
398 		reg_val = cntx->sepc;
399 	else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
400 		 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
401 		reg_val = ((unsigned long *)cntx)[reg_num];
402 	else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
403 		reg_val = (cntx->sstatus & SR_SPP) ?
404 				KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
405 	else
406 		return -ENOENT;
407 
408 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
409 		return -EFAULT;
410 
411 	return 0;
412 }
413 
kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)414 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
415 				       const struct kvm_one_reg *reg)
416 {
417 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
418 	unsigned long __user *uaddr =
419 			(unsigned long __user *)(unsigned long)reg->addr;
420 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
421 					    KVM_REG_SIZE_MASK |
422 					    KVM_REG_RISCV_CORE);
423 	unsigned long reg_val;
424 
425 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
426 		return -EINVAL;
427 	if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
428 		return -ENOENT;
429 
430 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
431 		return -EFAULT;
432 
433 	if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
434 		cntx->sepc = reg_val;
435 	else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
436 		 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
437 		((unsigned long *)cntx)[reg_num] = reg_val;
438 	else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
439 		if (reg_val == KVM_RISCV_MODE_S)
440 			cntx->sstatus |= SR_SPP;
441 		else
442 			cntx->sstatus &= ~SR_SPP;
443 	} else
444 		return -ENOENT;
445 
446 	return 0;
447 }
448 
kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)449 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
450 					  unsigned long reg_num,
451 					  unsigned long *out_val)
452 {
453 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
454 
455 	if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
456 		return -ENOENT;
457 
458 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
459 		kvm_riscv_vcpu_flush_interrupts(vcpu);
460 		*out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
461 		*out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
462 	} else
463 		*out_val = ((unsigned long *)csr)[reg_num];
464 
465 	return 0;
466 }
467 
kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)468 static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
469 					  unsigned long reg_num,
470 					  unsigned long reg_val)
471 {
472 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
473 
474 	if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
475 		return -ENOENT;
476 
477 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
478 		reg_val &= VSIP_VALID_MASK;
479 		reg_val <<= VSIP_TO_HVIP_SHIFT;
480 	}
481 
482 	((unsigned long *)csr)[reg_num] = reg_val;
483 
484 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
485 		WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
486 
487 	return 0;
488 }
489 
kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)490 static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
491 						   unsigned long reg_num,
492 						   unsigned long reg_val)
493 {
494 	struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
495 
496 	if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
497 		sizeof(unsigned long))
498 		return -EINVAL;
499 
500 	((unsigned long *)csr)[reg_num] = reg_val;
501 	return 0;
502 }
503 
kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)504 static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
505 					    unsigned long reg_num,
506 					    unsigned long *out_val)
507 {
508 	struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
509 
510 	if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
511 		sizeof(unsigned long))
512 		return -EINVAL;
513 
514 	*out_val = ((unsigned long *)csr)[reg_num];
515 	return 0;
516 }
517 
kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)518 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
519 				      const struct kvm_one_reg *reg)
520 {
521 	int rc;
522 	unsigned long __user *uaddr =
523 			(unsigned long __user *)(unsigned long)reg->addr;
524 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
525 					    KVM_REG_SIZE_MASK |
526 					    KVM_REG_RISCV_CSR);
527 	unsigned long reg_val, reg_subtype;
528 
529 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
530 		return -EINVAL;
531 
532 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
533 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
534 	switch (reg_subtype) {
535 	case KVM_REG_RISCV_CSR_GENERAL:
536 		rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
537 		break;
538 	case KVM_REG_RISCV_CSR_AIA:
539 		rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
540 		break;
541 	case KVM_REG_RISCV_CSR_SMSTATEEN:
542 		rc = -EINVAL;
543 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
544 			rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num,
545 							      &reg_val);
546 		break;
547 	default:
548 		rc = -ENOENT;
549 		break;
550 	}
551 	if (rc)
552 		return rc;
553 
554 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
555 		return -EFAULT;
556 
557 	return 0;
558 }
559 
kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)560 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
561 				      const struct kvm_one_reg *reg)
562 {
563 	int rc;
564 	unsigned long __user *uaddr =
565 			(unsigned long __user *)(unsigned long)reg->addr;
566 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
567 					    KVM_REG_SIZE_MASK |
568 					    KVM_REG_RISCV_CSR);
569 	unsigned long reg_val, reg_subtype;
570 
571 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
572 		return -EINVAL;
573 
574 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
575 		return -EFAULT;
576 
577 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
578 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
579 	switch (reg_subtype) {
580 	case KVM_REG_RISCV_CSR_GENERAL:
581 		rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
582 		break;
583 	case KVM_REG_RISCV_CSR_AIA:
584 		rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
585 		break;
586 	case KVM_REG_RISCV_CSR_SMSTATEEN:
587 		rc = -EINVAL;
588 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
589 			rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
590 							      reg_val);
591 		break;
592 	default:
593 		rc = -ENOENT;
594 		break;
595 	}
596 	if (rc)
597 		return rc;
598 
599 	return 0;
600 }
601 
riscv_vcpu_get_isa_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)602 static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
603 					 unsigned long reg_num,
604 					 unsigned long *reg_val)
605 {
606 	unsigned long host_isa_ext;
607 
608 	if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
609 	    reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
610 		return -ENOENT;
611 
612 	host_isa_ext = kvm_isa_ext_arr[reg_num];
613 	if (!__riscv_isa_extension_available(NULL, host_isa_ext))
614 		return -ENOENT;
615 
616 	*reg_val = 0;
617 	if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
618 		*reg_val = 1; /* Mark the given extension as available */
619 
620 	return 0;
621 }
622 
riscv_vcpu_set_isa_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)623 static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
624 					 unsigned long reg_num,
625 					 unsigned long reg_val)
626 {
627 	unsigned long host_isa_ext;
628 
629 	if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
630 	    reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
631 		return -ENOENT;
632 
633 	host_isa_ext = kvm_isa_ext_arr[reg_num];
634 	if (!__riscv_isa_extension_available(NULL, host_isa_ext))
635 		return -ENOENT;
636 
637 	if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa))
638 		return 0;
639 
640 	if (!vcpu->arch.ran_atleast_once) {
641 		/*
642 		 * All multi-letter extension and a few single letter
643 		 * extension can be disabled
644 		 */
645 		if (reg_val == 1 &&
646 		    kvm_riscv_vcpu_isa_enable_allowed(reg_num))
647 			set_bit(host_isa_ext, vcpu->arch.isa);
648 		else if (!reg_val &&
649 			 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
650 			clear_bit(host_isa_ext, vcpu->arch.isa);
651 		else
652 			return -EINVAL;
653 		kvm_riscv_vcpu_fp_reset(vcpu);
654 	} else {
655 		return -EBUSY;
656 	}
657 
658 	return 0;
659 }
660 
riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)661 static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
662 					unsigned long reg_num,
663 					unsigned long *reg_val)
664 {
665 	unsigned long i, ext_id, ext_val;
666 
667 	if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
668 		return -ENOENT;
669 
670 	for (i = 0; i < BITS_PER_LONG; i++) {
671 		ext_id = i + reg_num * BITS_PER_LONG;
672 		if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
673 			break;
674 
675 		ext_val = 0;
676 		riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
677 		if (ext_val)
678 			*reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
679 	}
680 
681 	return 0;
682 }
683 
riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val,bool enable)684 static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
685 					unsigned long reg_num,
686 					unsigned long reg_val, bool enable)
687 {
688 	unsigned long i, ext_id;
689 
690 	if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
691 		return -ENOENT;
692 
693 	for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
694 		ext_id = i + reg_num * BITS_PER_LONG;
695 		if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
696 			break;
697 
698 		riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
699 	}
700 
701 	return 0;
702 }
703 
kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)704 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
705 					  const struct kvm_one_reg *reg)
706 {
707 	int rc;
708 	unsigned long __user *uaddr =
709 			(unsigned long __user *)(unsigned long)reg->addr;
710 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
711 					    KVM_REG_SIZE_MASK |
712 					    KVM_REG_RISCV_ISA_EXT);
713 	unsigned long reg_val, reg_subtype;
714 
715 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
716 		return -EINVAL;
717 
718 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
719 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
720 
721 	reg_val = 0;
722 	switch (reg_subtype) {
723 	case KVM_REG_RISCV_ISA_SINGLE:
724 		rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, &reg_val);
725 		break;
726 	case KVM_REG_RISCV_ISA_MULTI_EN:
727 	case KVM_REG_RISCV_ISA_MULTI_DIS:
728 		rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, &reg_val);
729 		if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
730 			reg_val = ~reg_val;
731 		break;
732 	default:
733 		rc = -ENOENT;
734 	}
735 	if (rc)
736 		return rc;
737 
738 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
739 		return -EFAULT;
740 
741 	return 0;
742 }
743 
kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)744 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
745 					  const struct kvm_one_reg *reg)
746 {
747 	unsigned long __user *uaddr =
748 			(unsigned long __user *)(unsigned long)reg->addr;
749 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
750 					    KVM_REG_SIZE_MASK |
751 					    KVM_REG_RISCV_ISA_EXT);
752 	unsigned long reg_val, reg_subtype;
753 
754 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
755 		return -EINVAL;
756 
757 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
758 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
759 
760 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
761 		return -EFAULT;
762 
763 	switch (reg_subtype) {
764 	case KVM_REG_RISCV_ISA_SINGLE:
765 		return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
766 	case KVM_REG_RISCV_ISA_MULTI_EN:
767 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
768 	case KVM_REG_RISCV_ISA_MULTI_DIS:
769 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
770 	default:
771 		return -ENOENT;
772 	}
773 
774 	return 0;
775 }
776 
copy_config_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)777 static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
778 				u64 __user *uindices)
779 {
780 	int n = 0;
781 
782 	for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
783 		 i++) {
784 		u64 size;
785 		u64 reg;
786 
787 		/*
788 		 * Avoid reporting config reg if the corresponding extension
789 		 * was not available.
790 		 */
791 		if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
792 			!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
793 			continue;
794 		else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
795 			!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
796 			continue;
797 
798 		size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
799 		reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
800 
801 		if (uindices) {
802 			if (put_user(reg, uindices))
803 				return -EFAULT;
804 			uindices++;
805 		}
806 
807 		n++;
808 	}
809 
810 	return n;
811 }
812 
num_config_regs(const struct kvm_vcpu * vcpu)813 static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
814 {
815 	return copy_config_reg_indices(vcpu, NULL);
816 }
817 
num_core_regs(void)818 static inline unsigned long num_core_regs(void)
819 {
820 	return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
821 }
822 
copy_core_reg_indices(u64 __user * uindices)823 static int copy_core_reg_indices(u64 __user *uindices)
824 {
825 	int n = num_core_regs();
826 
827 	for (int i = 0; i < n; i++) {
828 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
829 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
830 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
831 
832 		if (uindices) {
833 			if (put_user(reg, uindices))
834 				return -EFAULT;
835 			uindices++;
836 		}
837 	}
838 
839 	return n;
840 }
841 
num_csr_regs(const struct kvm_vcpu * vcpu)842 static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
843 {
844 	unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
845 
846 	if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
847 		n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
848 	if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
849 		n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
850 
851 	return n;
852 }
853 
copy_csr_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)854 static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
855 				u64 __user *uindices)
856 {
857 	int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
858 	int n2 = 0, n3 = 0;
859 
860 	/* copy general csr regs */
861 	for (int i = 0; i < n1; i++) {
862 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
863 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
864 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
865 				  KVM_REG_RISCV_CSR_GENERAL | i;
866 
867 		if (uindices) {
868 			if (put_user(reg, uindices))
869 				return -EFAULT;
870 			uindices++;
871 		}
872 	}
873 
874 	/* copy AIA csr regs */
875 	if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
876 		n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
877 
878 		for (int i = 0; i < n2; i++) {
879 			u64 size = IS_ENABLED(CONFIG_32BIT) ?
880 				   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
881 			u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
882 					  KVM_REG_RISCV_CSR_AIA | i;
883 
884 			if (uindices) {
885 				if (put_user(reg, uindices))
886 					return -EFAULT;
887 				uindices++;
888 			}
889 		}
890 	}
891 
892 	/* copy Smstateen csr regs */
893 	if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) {
894 		n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
895 
896 		for (int i = 0; i < n3; i++) {
897 			u64 size = IS_ENABLED(CONFIG_32BIT) ?
898 				   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
899 			u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
900 					  KVM_REG_RISCV_CSR_SMSTATEEN | i;
901 
902 			if (uindices) {
903 				if (put_user(reg, uindices))
904 					return -EFAULT;
905 				uindices++;
906 			}
907 		}
908 	}
909 
910 	return n1 + n2 + n3;
911 }
912 
num_timer_regs(void)913 static inline unsigned long num_timer_regs(void)
914 {
915 	return sizeof(struct kvm_riscv_timer) / sizeof(u64);
916 }
917 
copy_timer_reg_indices(u64 __user * uindices)918 static int copy_timer_reg_indices(u64 __user *uindices)
919 {
920 	int n = num_timer_regs();
921 
922 	for (int i = 0; i < n; i++) {
923 		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
924 			  KVM_REG_RISCV_TIMER | i;
925 
926 		if (uindices) {
927 			if (put_user(reg, uindices))
928 				return -EFAULT;
929 			uindices++;
930 		}
931 	}
932 
933 	return n;
934 }
935 
num_fp_f_regs(const struct kvm_vcpu * vcpu)936 static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
937 {
938 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
939 
940 	if (riscv_isa_extension_available(vcpu->arch.isa, f))
941 		return sizeof(cntx->fp.f) / sizeof(u32);
942 	else
943 		return 0;
944 }
945 
copy_fp_f_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)946 static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
947 				u64 __user *uindices)
948 {
949 	int n = num_fp_f_regs(vcpu);
950 
951 	for (int i = 0; i < n; i++) {
952 		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
953 			  KVM_REG_RISCV_FP_F | i;
954 
955 		if (uindices) {
956 			if (put_user(reg, uindices))
957 				return -EFAULT;
958 			uindices++;
959 		}
960 	}
961 
962 	return n;
963 }
964 
num_fp_d_regs(const struct kvm_vcpu * vcpu)965 static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
966 {
967 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
968 
969 	if (riscv_isa_extension_available(vcpu->arch.isa, d))
970 		return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
971 	else
972 		return 0;
973 }
974 
copy_fp_d_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)975 static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
976 				u64 __user *uindices)
977 {
978 	int i;
979 	int n = num_fp_d_regs(vcpu);
980 	u64 reg;
981 
982 	/* copy fp.d.f indices */
983 	for (i = 0; i < n-1; i++) {
984 		reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
985 		      KVM_REG_RISCV_FP_D | i;
986 
987 		if (uindices) {
988 			if (put_user(reg, uindices))
989 				return -EFAULT;
990 			uindices++;
991 		}
992 	}
993 
994 	/* copy fp.d.fcsr indices */
995 	reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
996 	if (uindices) {
997 		if (put_user(reg, uindices))
998 			return -EFAULT;
999 		uindices++;
1000 	}
1001 
1002 	return n;
1003 }
1004 
copy_isa_ext_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)1005 static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
1006 				u64 __user *uindices)
1007 {
1008 	unsigned int n = 0;
1009 	unsigned long isa_ext;
1010 
1011 	for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
1012 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
1013 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1014 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
1015 
1016 		isa_ext = kvm_isa_ext_arr[i];
1017 		if (!__riscv_isa_extension_available(NULL, isa_ext))
1018 			continue;
1019 
1020 		if (uindices) {
1021 			if (put_user(reg, uindices))
1022 				return -EFAULT;
1023 			uindices++;
1024 		}
1025 
1026 		n++;
1027 	}
1028 
1029 	return n;
1030 }
1031 
num_isa_ext_regs(const struct kvm_vcpu * vcpu)1032 static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
1033 {
1034 	return copy_isa_ext_reg_indices(vcpu, NULL);
1035 }
1036 
copy_sbi_ext_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)1037 static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1038 {
1039 	unsigned int n = 0;
1040 
1041 	for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
1042 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
1043 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1044 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
1045 			  KVM_REG_RISCV_SBI_SINGLE | i;
1046 
1047 		if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
1048 			continue;
1049 
1050 		if (uindices) {
1051 			if (put_user(reg, uindices))
1052 				return -EFAULT;
1053 			uindices++;
1054 		}
1055 
1056 		n++;
1057 	}
1058 
1059 	return n;
1060 }
1061 
num_sbi_ext_regs(struct kvm_vcpu * vcpu)1062 static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
1063 {
1064 	return copy_sbi_ext_reg_indices(vcpu, NULL);
1065 }
1066 
copy_sbi_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)1067 static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1068 {
1069 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
1070 	int total = 0;
1071 
1072 	if (scontext->ext_status[KVM_RISCV_SBI_EXT_STA] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) {
1073 		u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1074 		int n = sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long);
1075 
1076 		for (int i = 0; i < n; i++) {
1077 			u64 reg = KVM_REG_RISCV | size |
1078 				  KVM_REG_RISCV_SBI_STATE |
1079 				  KVM_REG_RISCV_SBI_STA | i;
1080 
1081 			if (uindices) {
1082 				if (put_user(reg, uindices))
1083 					return -EFAULT;
1084 				uindices++;
1085 			}
1086 		}
1087 
1088 		total += n;
1089 	}
1090 
1091 	return total;
1092 }
1093 
num_sbi_regs(struct kvm_vcpu * vcpu)1094 static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
1095 {
1096 	return copy_sbi_reg_indices(vcpu, NULL);
1097 }
1098 
num_vector_regs(const struct kvm_vcpu * vcpu)1099 static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
1100 {
1101 	if (!riscv_isa_extension_available(vcpu->arch.isa, v))
1102 		return 0;
1103 
1104 	/* vstart, vl, vtype, vcsr, vlenb and 32 vector regs */
1105 	return 37;
1106 }
1107 
copy_vector_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)1108 static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu,
1109 				u64 __user *uindices)
1110 {
1111 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
1112 	int n = num_vector_regs(vcpu);
1113 	u64 reg, size;
1114 	int i;
1115 
1116 	if (n == 0)
1117 		return 0;
1118 
1119 	/* copy vstart, vl, vtype, vcsr and vlenb */
1120 	size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1121 	for (i = 0; i < 5; i++) {
1122 		reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i;
1123 
1124 		if (uindices) {
1125 			if (put_user(reg, uindices))
1126 				return -EFAULT;
1127 			uindices++;
1128 		}
1129 	}
1130 
1131 	/* vector_regs have a variable 'vlenb' size */
1132 	size = __builtin_ctzl(cntx->vector.vlenb);
1133 	size <<= KVM_REG_SIZE_SHIFT;
1134 	for (i = 0; i < 32; i++) {
1135 		reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size |
1136 			KVM_REG_RISCV_VECTOR_REG(i);
1137 
1138 		if (uindices) {
1139 			if (put_user(reg, uindices))
1140 				return -EFAULT;
1141 			uindices++;
1142 		}
1143 	}
1144 
1145 	return n;
1146 }
1147 
1148 /*
1149  * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
1150  *
1151  * This is for all registers.
1152  */
kvm_riscv_vcpu_num_regs(struct kvm_vcpu * vcpu)1153 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
1154 {
1155 	unsigned long res = 0;
1156 
1157 	res += num_config_regs(vcpu);
1158 	res += num_core_regs();
1159 	res += num_csr_regs(vcpu);
1160 	res += num_timer_regs();
1161 	res += num_fp_f_regs(vcpu);
1162 	res += num_fp_d_regs(vcpu);
1163 	res += num_vector_regs(vcpu);
1164 	res += num_isa_ext_regs(vcpu);
1165 	res += num_sbi_ext_regs(vcpu);
1166 	res += num_sbi_regs(vcpu);
1167 
1168 	return res;
1169 }
1170 
1171 /*
1172  * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
1173  */
kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)1174 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
1175 				    u64 __user *uindices)
1176 {
1177 	int ret;
1178 
1179 	ret = copy_config_reg_indices(vcpu, uindices);
1180 	if (ret < 0)
1181 		return ret;
1182 	uindices += ret;
1183 
1184 	ret = copy_core_reg_indices(uindices);
1185 	if (ret < 0)
1186 		return ret;
1187 	uindices += ret;
1188 
1189 	ret = copy_csr_reg_indices(vcpu, uindices);
1190 	if (ret < 0)
1191 		return ret;
1192 	uindices += ret;
1193 
1194 	ret = copy_timer_reg_indices(uindices);
1195 	if (ret < 0)
1196 		return ret;
1197 	uindices += ret;
1198 
1199 	ret = copy_fp_f_reg_indices(vcpu, uindices);
1200 	if (ret < 0)
1201 		return ret;
1202 	uindices += ret;
1203 
1204 	ret = copy_fp_d_reg_indices(vcpu, uindices);
1205 	if (ret < 0)
1206 		return ret;
1207 	uindices += ret;
1208 
1209 	ret = copy_vector_reg_indices(vcpu, uindices);
1210 	if (ret < 0)
1211 		return ret;
1212 	uindices += ret;
1213 
1214 	ret = copy_isa_ext_reg_indices(vcpu, uindices);
1215 	if (ret < 0)
1216 		return ret;
1217 	uindices += ret;
1218 
1219 	ret = copy_sbi_ext_reg_indices(vcpu, uindices);
1220 	if (ret < 0)
1221 		return ret;
1222 	uindices += ret;
1223 
1224 	ret = copy_sbi_reg_indices(vcpu, uindices);
1225 	if (ret < 0)
1226 		return ret;
1227 	uindices += ret;
1228 
1229 	return 0;
1230 }
1231 
kvm_riscv_vcpu_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1232 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
1233 			   const struct kvm_one_reg *reg)
1234 {
1235 	switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1236 	case KVM_REG_RISCV_CONFIG:
1237 		return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
1238 	case KVM_REG_RISCV_CORE:
1239 		return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
1240 	case KVM_REG_RISCV_CSR:
1241 		return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
1242 	case KVM_REG_RISCV_TIMER:
1243 		return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
1244 	case KVM_REG_RISCV_FP_F:
1245 		return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1246 						 KVM_REG_RISCV_FP_F);
1247 	case KVM_REG_RISCV_FP_D:
1248 		return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1249 						 KVM_REG_RISCV_FP_D);
1250 	case KVM_REG_RISCV_VECTOR:
1251 		return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
1252 	case KVM_REG_RISCV_ISA_EXT:
1253 		return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
1254 	case KVM_REG_RISCV_SBI_EXT:
1255 		return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
1256 	case KVM_REG_RISCV_SBI_STATE:
1257 		return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg);
1258 	default:
1259 		break;
1260 	}
1261 
1262 	return -ENOENT;
1263 }
1264 
kvm_riscv_vcpu_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1265 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
1266 			   const struct kvm_one_reg *reg)
1267 {
1268 	switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1269 	case KVM_REG_RISCV_CONFIG:
1270 		return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
1271 	case KVM_REG_RISCV_CORE:
1272 		return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
1273 	case KVM_REG_RISCV_CSR:
1274 		return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
1275 	case KVM_REG_RISCV_TIMER:
1276 		return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
1277 	case KVM_REG_RISCV_FP_F:
1278 		return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1279 						 KVM_REG_RISCV_FP_F);
1280 	case KVM_REG_RISCV_FP_D:
1281 		return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1282 						 KVM_REG_RISCV_FP_D);
1283 	case KVM_REG_RISCV_VECTOR:
1284 		return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
1285 	case KVM_REG_RISCV_ISA_EXT:
1286 		return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
1287 	case KVM_REG_RISCV_SBI_EXT:
1288 		return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
1289 	case KVM_REG_RISCV_SBI_STATE:
1290 		return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg);
1291 	default:
1292 		break;
1293 	}
1294 
1295 	return -ENOENT;
1296 }
1297