1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
4 *
5 * Authors:
6 * Alexander Graf <[email protected]>
7 */
8
9 #include <linux/kvm_host.h>
10 #include <linux/hash.h>
11 #include <linux/slab.h>
12 #include <linux/rculist.h>
13
14 #include <asm/kvm_ppc.h>
15 #include <asm/kvm_book3s.h>
16 #include <asm/machdep.h>
17 #include <asm/mmu_context.h>
18 #include <asm/hw_irq.h>
19
20 #include "trace_pr.h"
21
22 #define PTE_SIZE 12
23
24 static struct kmem_cache *hpte_cache;
25
kvmppc_mmu_hash_pte(u64 eaddr)26 static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
27 {
28 return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
29 }
30
kvmppc_mmu_hash_pte_long(u64 eaddr)31 static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr)
32 {
33 return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE,
34 HPTEG_HASH_BITS_PTE_LONG);
35 }
36
kvmppc_mmu_hash_vpte(u64 vpage)37 static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
38 {
39 return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
40 }
41
kvmppc_mmu_hash_vpte_long(u64 vpage)42 static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
43 {
44 return hash_64((vpage & 0xffffff000ULL) >> 12,
45 HPTEG_HASH_BITS_VPTE_LONG);
46 }
47
48 #ifdef CONFIG_PPC_BOOK3S_64
kvmppc_mmu_hash_vpte_64k(u64 vpage)49 static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage)
50 {
51 return hash_64((vpage & 0xffffffff0ULL) >> 4,
52 HPTEG_HASH_BITS_VPTE_64K);
53 }
54 #endif
55
kvmppc_mmu_hpte_cache_map(struct kvm_vcpu * vcpu,struct hpte_cache * pte)56 void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
57 {
58 u64 index;
59 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
60
61 trace_kvm_book3s_mmu_map(pte);
62
63 spin_lock(&vcpu3s->mmu_lock);
64
65 /* Add to ePTE list */
66 index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
67 hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);
68
69 /* Add to ePTE_long list */
70 index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
71 hlist_add_head_rcu(&pte->list_pte_long,
72 &vcpu3s->hpte_hash_pte_long[index]);
73
74 /* Add to vPTE list */
75 index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
76 hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);
77
78 /* Add to vPTE_long list */
79 index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
80 hlist_add_head_rcu(&pte->list_vpte_long,
81 &vcpu3s->hpte_hash_vpte_long[index]);
82
83 #ifdef CONFIG_PPC_BOOK3S_64
84 /* Add to vPTE_64k list */
85 index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage);
86 hlist_add_head_rcu(&pte->list_vpte_64k,
87 &vcpu3s->hpte_hash_vpte_64k[index]);
88 #endif
89
90 vcpu3s->hpte_cache_count++;
91
92 spin_unlock(&vcpu3s->mmu_lock);
93 }
94
invalidate_pte(struct kvm_vcpu * vcpu,struct hpte_cache * pte)95 static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
96 {
97 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
98
99 trace_kvm_book3s_mmu_invalidate(pte);
100
101 /* Different for 32 and 64 bit */
102 kvmppc_mmu_invalidate_pte(vcpu, pte);
103
104 spin_lock(&vcpu3s->mmu_lock);
105
106 /* pte already invalidated in between? */
107 if (hlist_unhashed(&pte->list_pte)) {
108 spin_unlock(&vcpu3s->mmu_lock);
109 return;
110 }
111
112 hlist_del_init_rcu(&pte->list_pte);
113 hlist_del_init_rcu(&pte->list_pte_long);
114 hlist_del_init_rcu(&pte->list_vpte);
115 hlist_del_init_rcu(&pte->list_vpte_long);
116 #ifdef CONFIG_PPC_BOOK3S_64
117 hlist_del_init_rcu(&pte->list_vpte_64k);
118 #endif
119 vcpu3s->hpte_cache_count--;
120
121 spin_unlock(&vcpu3s->mmu_lock);
122
123 kfree_rcu(pte, rcu_head);
124 }
125
kvmppc_mmu_pte_flush_all(struct kvm_vcpu * vcpu)126 static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
127 {
128 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
129 struct hpte_cache *pte;
130 int i;
131
132 rcu_read_lock();
133
134 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
135 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
136
137 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
138 invalidate_pte(vcpu, pte);
139 }
140
141 rcu_read_unlock();
142 }
143
kvmppc_mmu_pte_flush_page(struct kvm_vcpu * vcpu,ulong guest_ea)144 static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
145 {
146 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
147 struct hlist_head *list;
148 struct hpte_cache *pte;
149
150 /* Find the list of entries in the map */
151 list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
152
153 rcu_read_lock();
154
155 /* Check the list for matching entries and invalidate */
156 hlist_for_each_entry_rcu(pte, list, list_pte)
157 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
158 invalidate_pte(vcpu, pte);
159
160 rcu_read_unlock();
161 }
162
kvmppc_mmu_pte_flush_long(struct kvm_vcpu * vcpu,ulong guest_ea)163 static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
164 {
165 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
166 struct hlist_head *list;
167 struct hpte_cache *pte;
168
169 /* Find the list of entries in the map */
170 list = &vcpu3s->hpte_hash_pte_long[
171 kvmppc_mmu_hash_pte_long(guest_ea)];
172
173 rcu_read_lock();
174
175 /* Check the list for matching entries and invalidate */
176 hlist_for_each_entry_rcu(pte, list, list_pte_long)
177 if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
178 invalidate_pte(vcpu, pte);
179
180 rcu_read_unlock();
181 }
182
kvmppc_mmu_pte_flush(struct kvm_vcpu * vcpu,ulong guest_ea,ulong ea_mask)183 void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
184 {
185 trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask);
186 guest_ea &= ea_mask;
187
188 switch (ea_mask) {
189 case ~0xfffUL:
190 kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
191 break;
192 case 0x0ffff000:
193 kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
194 break;
195 case 0:
196 /* Doing a complete flush -> start from scratch */
197 kvmppc_mmu_pte_flush_all(vcpu);
198 break;
199 default:
200 WARN_ON(1);
201 break;
202 }
203 }
204
205 /* Flush with mask 0xfffffffff */
kvmppc_mmu_pte_vflush_short(struct kvm_vcpu * vcpu,u64 guest_vp)206 static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
207 {
208 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
209 struct hlist_head *list;
210 struct hpte_cache *pte;
211 u64 vp_mask = 0xfffffffffULL;
212
213 list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
214
215 rcu_read_lock();
216
217 /* Check the list for matching entries and invalidate */
218 hlist_for_each_entry_rcu(pte, list, list_vpte)
219 if ((pte->pte.vpage & vp_mask) == guest_vp)
220 invalidate_pte(vcpu, pte);
221
222 rcu_read_unlock();
223 }
224
225 #ifdef CONFIG_PPC_BOOK3S_64
226 /* Flush with mask 0xffffffff0 */
kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu * vcpu,u64 guest_vp)227 static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu *vcpu, u64 guest_vp)
228 {
229 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
230 struct hlist_head *list;
231 struct hpte_cache *pte;
232 u64 vp_mask = 0xffffffff0ULL;
233
234 list = &vcpu3s->hpte_hash_vpte_64k[
235 kvmppc_mmu_hash_vpte_64k(guest_vp)];
236
237 rcu_read_lock();
238
239 /* Check the list for matching entries and invalidate */
240 hlist_for_each_entry_rcu(pte, list, list_vpte_64k)
241 if ((pte->pte.vpage & vp_mask) == guest_vp)
242 invalidate_pte(vcpu, pte);
243
244 rcu_read_unlock();
245 }
246 #endif
247
248 /* Flush with mask 0xffffff000 */
kvmppc_mmu_pte_vflush_long(struct kvm_vcpu * vcpu,u64 guest_vp)249 static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
250 {
251 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
252 struct hlist_head *list;
253 struct hpte_cache *pte;
254 u64 vp_mask = 0xffffff000ULL;
255
256 list = &vcpu3s->hpte_hash_vpte_long[
257 kvmppc_mmu_hash_vpte_long(guest_vp)];
258
259 rcu_read_lock();
260
261 /* Check the list for matching entries and invalidate */
262 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
263 if ((pte->pte.vpage & vp_mask) == guest_vp)
264 invalidate_pte(vcpu, pte);
265
266 rcu_read_unlock();
267 }
268
kvmppc_mmu_pte_vflush(struct kvm_vcpu * vcpu,u64 guest_vp,u64 vp_mask)269 void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
270 {
271 trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask);
272 guest_vp &= vp_mask;
273
274 switch(vp_mask) {
275 case 0xfffffffffULL:
276 kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
277 break;
278 #ifdef CONFIG_PPC_BOOK3S_64
279 case 0xffffffff0ULL:
280 kvmppc_mmu_pte_vflush_64k(vcpu, guest_vp);
281 break;
282 #endif
283 case 0xffffff000ULL:
284 kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
285 break;
286 default:
287 WARN_ON(1);
288 return;
289 }
290 }
291
kvmppc_mmu_pte_pflush(struct kvm_vcpu * vcpu,ulong pa_start,ulong pa_end)292 void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
293 {
294 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
295 struct hpte_cache *pte;
296 int i;
297
298 trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end);
299
300 rcu_read_lock();
301
302 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
303 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
304
305 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
306 if ((pte->pte.raddr >= pa_start) &&
307 (pte->pte.raddr < pa_end))
308 invalidate_pte(vcpu, pte);
309 }
310
311 rcu_read_unlock();
312 }
313
kvmppc_mmu_hpte_cache_next(struct kvm_vcpu * vcpu)314 struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
315 {
316 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
317 struct hpte_cache *pte;
318
319 if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
320 kvmppc_mmu_pte_flush_all(vcpu);
321
322 pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
323
324 return pte;
325 }
326
kvmppc_mmu_hpte_cache_free(struct hpte_cache * pte)327 void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte)
328 {
329 kmem_cache_free(hpte_cache, pte);
330 }
331
kvmppc_mmu_hpte_destroy(struct kvm_vcpu * vcpu)332 void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
333 {
334 kvmppc_mmu_pte_flush(vcpu, 0, 0);
335 }
336
kvmppc_mmu_hpte_init_hash(struct hlist_head * hash_list,int len)337 static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
338 {
339 int i;
340
341 for (i = 0; i < len; i++)
342 INIT_HLIST_HEAD(&hash_list[i]);
343 }
344
kvmppc_mmu_hpte_init(struct kvm_vcpu * vcpu)345 int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
346 {
347 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
348
349 /* init hpte lookup hashes */
350 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte,
351 ARRAY_SIZE(vcpu3s->hpte_hash_pte));
352 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long,
353 ARRAY_SIZE(vcpu3s->hpte_hash_pte_long));
354 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte,
355 ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
356 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
357 ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
358 #ifdef CONFIG_PPC_BOOK3S_64
359 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_64k,
360 ARRAY_SIZE(vcpu3s->hpte_hash_vpte_64k));
361 #endif
362
363 spin_lock_init(&vcpu3s->mmu_lock);
364
365 return 0;
366 }
367
kvmppc_mmu_hpte_sysinit(void)368 int kvmppc_mmu_hpte_sysinit(void)
369 {
370 /* init hpte slab cache */
371 hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
372 sizeof(struct hpte_cache), 0, NULL);
373
374 return 0;
375 }
376
kvmppc_mmu_hpte_sysexit(void)377 void kvmppc_mmu_hpte_sysexit(void)
378 {
379 kmem_cache_destroy(hpte_cache);
380 }
381