1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_mmu.h>
8 #include <asm/kvm_vcpu.h>
9 #include <asm/kvm_eiointc.h>
10 #include <asm/kvm_pch_pic.h>
11 
12 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
13 	KVM_GENERIC_VM_STATS(),
14 	STATS_DESC_ICOUNTER(VM, pages),
15 	STATS_DESC_ICOUNTER(VM, hugepages),
16 };
17 
18 const struct kvm_stats_header kvm_vm_stats_header = {
19 	.name_size = KVM_STATS_NAME_SIZE,
20 	.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
21 	.id_offset =  sizeof(struct kvm_stats_header),
22 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
23 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
24 					sizeof(kvm_vm_stats_desc),
25 };
26 
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)27 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
28 {
29 	int i;
30 
31 	/* Allocate page table to map GPA -> RPA */
32 	kvm->arch.pgd = kvm_pgd_alloc();
33 	if (!kvm->arch.pgd)
34 		return -ENOMEM;
35 
36 	kvm->arch.phyid_map = kvzalloc(sizeof(struct kvm_phyid_map), GFP_KERNEL_ACCOUNT);
37 	if (!kvm->arch.phyid_map) {
38 		free_page((unsigned long)kvm->arch.pgd);
39 		kvm->arch.pgd = NULL;
40 		return -ENOMEM;
41 	}
42 	spin_lock_init(&kvm->arch.phyid_map_lock);
43 
44 	kvm_init_vmcs(kvm);
45 
46 	/* Enable all PV features by default */
47 	kvm->arch.pv_features = BIT(KVM_FEATURE_IPI);
48 	if (kvm_pvtime_supported())
49 		kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
50 
51 	/*
52 	 * cpu_vabits means user address space only (a half of total).
53 	 * GPA size of VM is the same with the size of user address space.
54 	 */
55 	kvm->arch.gpa_size = BIT(cpu_vabits);
56 	kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1;
57 	kvm->arch.invalid_ptes[0] = 0;
58 	kvm->arch.invalid_ptes[1] = (unsigned long)invalid_pte_table;
59 #if CONFIG_PGTABLE_LEVELS > 2
60 	kvm->arch.invalid_ptes[2] = (unsigned long)invalid_pmd_table;
61 #endif
62 #if CONFIG_PGTABLE_LEVELS > 3
63 	kvm->arch.invalid_ptes[3] = (unsigned long)invalid_pud_table;
64 #endif
65 	for (i = 0; i <= kvm->arch.root_level; i++)
66 		kvm->arch.pte_shifts[i] = PAGE_SHIFT + i * (PAGE_SHIFT - 3);
67 
68 	return 0;
69 }
70 
kvm_arch_destroy_vm(struct kvm * kvm)71 void kvm_arch_destroy_vm(struct kvm *kvm)
72 {
73 	kvm_destroy_vcpus(kvm);
74 	free_page((unsigned long)kvm->arch.pgd);
75 	kvm->arch.pgd = NULL;
76 	kvfree(kvm->arch.phyid_map);
77 	kvm->arch.phyid_map = NULL;
78 }
79 
kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext)80 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
81 {
82 	int r;
83 
84 	switch (ext) {
85 	case KVM_CAP_IRQCHIP:
86 	case KVM_CAP_ONE_REG:
87 	case KVM_CAP_ENABLE_CAP:
88 	case KVM_CAP_READONLY_MEM:
89 	case KVM_CAP_SYNC_MMU:
90 	case KVM_CAP_IMMEDIATE_EXIT:
91 	case KVM_CAP_IOEVENTFD:
92 	case KVM_CAP_MP_STATE:
93 	case KVM_CAP_SET_GUEST_DEBUG:
94 		r = 1;
95 		break;
96 	case KVM_CAP_NR_VCPUS:
97 		r = num_online_cpus();
98 		break;
99 	case KVM_CAP_MAX_VCPUS:
100 		r = KVM_MAX_VCPUS;
101 		break;
102 	case KVM_CAP_MAX_VCPU_ID:
103 		r = KVM_MAX_VCPU_IDS;
104 		break;
105 	case KVM_CAP_NR_MEMSLOTS:
106 		r = KVM_USER_MEM_SLOTS;
107 		break;
108 	default:
109 		r = 0;
110 		break;
111 	}
112 
113 	return r;
114 }
115 
kvm_vm_feature_has_attr(struct kvm * kvm,struct kvm_device_attr * attr)116 static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
117 {
118 	switch (attr->attr) {
119 	case KVM_LOONGARCH_VM_FEAT_LSX:
120 		if (cpu_has_lsx)
121 			return 0;
122 		return -ENXIO;
123 	case KVM_LOONGARCH_VM_FEAT_LASX:
124 		if (cpu_has_lasx)
125 			return 0;
126 		return -ENXIO;
127 	case KVM_LOONGARCH_VM_FEAT_X86BT:
128 		if (cpu_has_lbt_x86)
129 			return 0;
130 		return -ENXIO;
131 	case KVM_LOONGARCH_VM_FEAT_ARMBT:
132 		if (cpu_has_lbt_arm)
133 			return 0;
134 		return -ENXIO;
135 	case KVM_LOONGARCH_VM_FEAT_MIPSBT:
136 		if (cpu_has_lbt_mips)
137 			return 0;
138 		return -ENXIO;
139 	case KVM_LOONGARCH_VM_FEAT_PMU:
140 		if (cpu_has_pmp)
141 			return 0;
142 		return -ENXIO;
143 	case KVM_LOONGARCH_VM_FEAT_PV_IPI:
144 		return 0;
145 	case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME:
146 		if (kvm_pvtime_supported())
147 			return 0;
148 		return -ENXIO;
149 	default:
150 		return -ENXIO;
151 	}
152 }
153 
kvm_vm_has_attr(struct kvm * kvm,struct kvm_device_attr * attr)154 static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
155 {
156 	switch (attr->group) {
157 	case KVM_LOONGARCH_VM_FEAT_CTRL:
158 		return kvm_vm_feature_has_attr(kvm, attr);
159 	default:
160 		return -ENXIO;
161 	}
162 }
163 
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)164 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
165 {
166 	void __user *argp = (void __user *)arg;
167 	struct kvm *kvm = filp->private_data;
168 	struct kvm_device_attr attr;
169 
170 	switch (ioctl) {
171 	case KVM_CREATE_IRQCHIP:
172 		return 0;
173 	case KVM_HAS_DEVICE_ATTR:
174 		if (copy_from_user(&attr, argp, sizeof(attr)))
175 			return -EFAULT;
176 
177 		return kvm_vm_has_attr(kvm, &attr);
178 	default:
179 		return -ENOIOCTLCMD;
180 	}
181 }
182 
kvm_vm_ioctl_irq_line(struct kvm * kvm,struct kvm_irq_level * irq_event,bool line_status)183 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, bool line_status)
184 {
185 	if (!kvm_arch_irqchip_in_kernel(kvm))
186 		return -ENXIO;
187 
188 	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
189 					irq_event->irq, irq_event->level, line_status);
190 
191 	return 0;
192 }
193 
kvm_arch_irqchip_in_kernel(struct kvm * kvm)194 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
195 {
196 	return (kvm->arch.ipi && kvm->arch.eiointc && kvm->arch.pch_pic);
197 }
198