1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
4 *
5 * Authors:
6 * Alexander Graf <[email protected]>
7 */
8
9 #include <linux/kvm_host.h>
10
11 #include <asm/kvm_ppc.h>
12 #include <asm/kvm_book3s.h>
13 #include <asm/book3s/32/mmu-hash.h>
14 #include <asm/machdep.h>
15 #include <asm/mmu_context.h>
16 #include <asm/hw_irq.h>
17 #include "book3s.h"
18
19 /* #define DEBUG_MMU */
20 /* #define DEBUG_SR */
21
22 #ifdef DEBUG_MMU
23 #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
24 #else
25 #define dprintk_mmu(a, ...) do { } while(0)
26 #endif
27
28 #ifdef DEBUG_SR
29 #define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
30 #else
31 #define dprintk_sr(a, ...) do { } while(0)
32 #endif
33
34 #if PAGE_SHIFT != 12
35 #error Unknown page size
36 #endif
37
38 #ifdef CONFIG_SMP
39 #error XXX need to grab mmu_hash_lock
40 #endif
41
42 #ifdef CONFIG_PTE_64BIT
43 #error Only 32 bit pages are supported for now
44 #endif
45
46 static ulong htab;
47 static u32 htabmask;
48
kvmppc_mmu_invalidate_pte(struct kvm_vcpu * vcpu,struct hpte_cache * pte)49 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
50 {
51 volatile u32 *pteg;
52
53 /* Remove from host HTAB */
54 pteg = (u32*)pte->slot;
55 pteg[0] = 0;
56
57 /* And make sure it's gone from the TLB too */
58 asm volatile ("sync");
59 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory");
60 asm volatile ("sync");
61 asm volatile ("tlbsync");
62 }
63
64 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
65 * a hash, so we don't waste cycles on looping */
kvmppc_sid_hash(struct kvm_vcpu * vcpu,u64 gvsid)66 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
67 {
68 return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
69 ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
70 ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
71 ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
72 ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
73 ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
74 ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
75 ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
76 }
77
78
find_sid_vsid(struct kvm_vcpu * vcpu,u64 gvsid)79 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
80 {
81 struct kvmppc_sid_map *map;
82 u16 sid_map_mask;
83
84 if (kvmppc_get_msr(vcpu) & MSR_PR)
85 gvsid |= VSID_PR;
86
87 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
88 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
89 if (map->guest_vsid == gvsid) {
90 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
91 gvsid, map->host_vsid);
92 return map;
93 }
94
95 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
96 if (map->guest_vsid == gvsid) {
97 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
98 gvsid, map->host_vsid);
99 return map;
100 }
101
102 dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid);
103 return NULL;
104 }
105
kvmppc_mmu_get_pteg(struct kvm_vcpu * vcpu,u32 vsid,u32 eaddr,bool primary)106 static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
107 bool primary)
108 {
109 u32 page, hash;
110 ulong pteg = htab;
111
112 page = (eaddr & ~ESID_MASK) >> 12;
113
114 hash = ((vsid ^ page) << 6);
115 if (!primary)
116 hash = ~hash;
117
118 hash &= htabmask;
119
120 pteg |= hash;
121
122 dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
123 htab, hash, htabmask, pteg);
124
125 return (u32*)pteg;
126 }
127
128 extern char etext[];
129
kvmppc_mmu_map_page(struct kvm_vcpu * vcpu,struct kvmppc_pte * orig_pte,bool iswrite)130 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
131 bool iswrite)
132 {
133 struct page *page;
134 kvm_pfn_t hpaddr;
135 u64 vpn;
136 u64 vsid;
137 struct kvmppc_sid_map *map;
138 volatile u32 *pteg;
139 u32 eaddr = orig_pte->eaddr;
140 u32 pteg0, pteg1;
141 register int rr = 0;
142 bool primary = false;
143 bool evict = false;
144 struct hpte_cache *pte;
145 int r = 0;
146 bool writable;
147
148 /* Get host physical address for gpa */
149 hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable, &page);
150 if (is_error_noslot_pfn(hpaddr)) {
151 printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
152 orig_pte->raddr);
153 r = -EINVAL;
154 goto out;
155 }
156 hpaddr <<= PAGE_SHIFT;
157
158 /* and write the mapping ea -> hpa into the pt */
159 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
160 map = find_sid_vsid(vcpu, vsid);
161 if (!map) {
162 kvmppc_mmu_map_segment(vcpu, eaddr);
163 map = find_sid_vsid(vcpu, vsid);
164 }
165 BUG_ON(!map);
166
167 vsid = map->host_vsid;
168 vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) |
169 ((eaddr & ~ESID_MASK) >> VPN_SHIFT);
170 next_pteg:
171 if (rr == 16) {
172 primary = !primary;
173 evict = true;
174 rr = 0;
175 }
176
177 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary);
178
179 /* not evicting yet */
180 if (!evict && (pteg[rr] & PTE_V)) {
181 rr += 2;
182 goto next_pteg;
183 }
184
185 dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr);
186 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
187 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
188 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
189 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
190 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
191 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
192 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
193 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
194
195 pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V |
196 (primary ? 0 : PTE_SEC);
197 pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
198
199 if (orig_pte->may_write && writable) {
200 pteg1 |= PP_RWRW;
201 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
202 } else {
203 pteg1 |= PP_RWRX;
204 }
205
206 if (orig_pte->may_execute)
207 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
208
209 local_irq_disable();
210
211 if (pteg[rr]) {
212 pteg[rr] = 0;
213 asm volatile ("sync");
214 }
215 pteg[rr + 1] = pteg1;
216 pteg[rr] = pteg0;
217 asm volatile ("sync");
218
219 local_irq_enable();
220
221 dprintk_mmu("KVM: new PTEG: %p\n", pteg);
222 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
223 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
224 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
225 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
226 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
227 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
228 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
229 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
230
231
232 /* Now tell our Shadow PTE code about the new page */
233
234 pte = kvmppc_mmu_hpte_cache_next(vcpu);
235 if (!pte) {
236 kvm_release_page_unused(page);
237 r = -EAGAIN;
238 goto out;
239 }
240
241 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
242 orig_pte->may_write ? 'w' : '-',
243 orig_pte->may_execute ? 'x' : '-',
244 orig_pte->eaddr, (ulong)pteg, vpn,
245 orig_pte->vpage, hpaddr);
246
247 pte->slot = (ulong)&pteg[rr];
248 pte->host_vpn = vpn;
249 pte->pte = *orig_pte;
250 pte->pfn = hpaddr >> PAGE_SHIFT;
251
252 kvmppc_mmu_hpte_cache_map(vcpu, pte);
253
254 kvm_release_page_clean(page);
255 out:
256 return r;
257 }
258
kvmppc_mmu_unmap_page(struct kvm_vcpu * vcpu,struct kvmppc_pte * pte)259 void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
260 {
261 kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL);
262 }
263
create_sid_map(struct kvm_vcpu * vcpu,u64 gvsid)264 static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
265 {
266 struct kvmppc_sid_map *map;
267 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
268 u16 sid_map_mask;
269 static int backwards_map = 0;
270
271 if (kvmppc_get_msr(vcpu) & MSR_PR)
272 gvsid |= VSID_PR;
273
274 /* We might get collisions that trap in preceding order, so let's
275 map them differently */
276
277 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
278 if (backwards_map)
279 sid_map_mask = SID_MAP_MASK - sid_map_mask;
280
281 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
282
283 /* Make sure we're taking the other map next time */
284 backwards_map = !backwards_map;
285
286 /* Uh-oh ... out of mappings. Let's flush! */
287 if (vcpu_book3s->vsid_next >= VSID_POOL_SIZE) {
288 vcpu_book3s->vsid_next = 0;
289 memset(vcpu_book3s->sid_map, 0,
290 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
291 kvmppc_mmu_pte_flush(vcpu, 0, 0);
292 kvmppc_mmu_flush_segments(vcpu);
293 }
294 map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next];
295 vcpu_book3s->vsid_next++;
296
297 map->guest_vsid = gvsid;
298 map->valid = true;
299
300 return map;
301 }
302
kvmppc_mmu_map_segment(struct kvm_vcpu * vcpu,ulong eaddr)303 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
304 {
305 u32 esid = eaddr >> SID_SHIFT;
306 u64 gvsid;
307 u32 sr;
308 struct kvmppc_sid_map *map;
309 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
310 int r = 0;
311
312 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
313 /* Invalidate an entry */
314 svcpu->sr[esid] = SR_INVALID;
315 r = -ENOENT;
316 goto out;
317 }
318
319 map = find_sid_vsid(vcpu, gvsid);
320 if (!map)
321 map = create_sid_map(vcpu, gvsid);
322
323 map->guest_esid = esid;
324 sr = map->host_vsid | SR_KP;
325 svcpu->sr[esid] = sr;
326
327 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
328
329 out:
330 svcpu_put(svcpu);
331 return r;
332 }
333
kvmppc_mmu_flush_segments(struct kvm_vcpu * vcpu)334 void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
335 {
336 int i;
337 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
338
339 dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
340 for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
341 svcpu->sr[i] = SR_INVALID;
342
343 svcpu_put(svcpu);
344 }
345
kvmppc_mmu_destroy_pr(struct kvm_vcpu * vcpu)346 void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
347 {
348 int i;
349
350 kvmppc_mmu_hpte_destroy(vcpu);
351 preempt_disable();
352 for (i = 0; i < SID_CONTEXTS; i++)
353 __destroy_context(to_book3s(vcpu)->context_id[i]);
354 preempt_enable();
355 }
356
kvmppc_mmu_init_pr(struct kvm_vcpu * vcpu)357 int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
358 {
359 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
360 int err;
361 ulong sdr1;
362 int i;
363 int j;
364
365 for (i = 0; i < SID_CONTEXTS; i++) {
366 err = __init_new_context();
367 if (err < 0)
368 goto init_fail;
369 vcpu3s->context_id[i] = err;
370
371 /* Remember context id for this combination */
372 for (j = 0; j < 16; j++)
373 vcpu3s->vsid_pool[(i * 16) + j] = CTX_TO_VSID(err, j);
374 }
375
376 vcpu3s->vsid_next = 0;
377
378 /* Remember where the HTAB is */
379 asm ( "mfsdr1 %0" : "=r"(sdr1) );
380 htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
381 htab = (ulong)__va(sdr1 & 0xffff0000);
382
383 kvmppc_mmu_hpte_init(vcpu);
384
385 return 0;
386
387 init_fail:
388 for (j = 0; j < i; j++) {
389 if (!vcpu3s->context_id[j])
390 continue;
391
392 __destroy_context(to_book3s(vcpu)->context_id[j]);
393 }
394
395 return -1;
396 }
397