xref: /nrf52832-nimble/rt-thread/components/vmm/vmm_context.c (revision 104654410c56c573564690304ae786df310c91fc)
1*10465441SEvalZero /*
2*10465441SEvalZero  * COPYRIGHT (C) 2013-2014, Real-Thread Information Technology Ltd
3*10465441SEvalZero  * All rights reserved
4*10465441SEvalZero  *
5*10465441SEvalZero  * SPDX-License-Identifier: Apache-2.0
6*10465441SEvalZero  *
7*10465441SEvalZero  * Change Logs:
8*10465441SEvalZero  * Date           Author       Notes
9*10465441SEvalZero  * 2013-11-04     Grissiom     add comment
10*10465441SEvalZero  */
11*10465441SEvalZero 
12*10465441SEvalZero #include <rthw.h>
13*10465441SEvalZero #include <rtthread.h>
14*10465441SEvalZero #include <interrupt.h>
15*10465441SEvalZero 
16*10465441SEvalZero #include <log_trace.h>
17*10465441SEvalZero #include <vmm.h>
18*10465441SEvalZero 
19*10465441SEvalZero #include "vmm_context.h"
20*10465441SEvalZero 
21*10465441SEvalZero struct rt_vmm_share_layout rt_vmm_share SECTION(".vmm.share");
22*10465441SEvalZero 
23*10465441SEvalZero volatile struct vmm_context *_vmm_context = RT_NULL;
24*10465441SEvalZero 
vmm_context_init(void * context_addr)25*10465441SEvalZero void vmm_context_init(void *context_addr)
26*10465441SEvalZero {
27*10465441SEvalZero     _vmm_context = (struct vmm_context *)context_addr;
28*10465441SEvalZero     rt_memset((void *)_vmm_context, 0x00, sizeof(struct vmm_context));
29*10465441SEvalZero     /* When loading RT-Thread, the IRQ on the guest should be disabled. */
30*10465441SEvalZero     _vmm_context->virq_status = 1;
31*10465441SEvalZero }
32*10465441SEvalZero 
33*10465441SEvalZero #ifdef RT_VMM_USING_DOMAIN
34*10465441SEvalZero unsigned long guest_domain_val SECTION(".bss.share");
35*10465441SEvalZero unsigned long vmm_domain_val SECTION(".bss.share");
36*10465441SEvalZero /* some RT-Thread code need to be called in the guest
37*10465441SEvalZero  * context(rt_thread_idle_excute for example). To simplify the code, we need a
38*10465441SEvalZero  * "super" domain mode to have access of both side. The code executed in super
39*10465441SEvalZero  * domain mode is restricted and should be harmless. */
40*10465441SEvalZero unsigned long super_domain_val SECTION(".bss.share");
vmm_context_init_domain(struct vmm_domain * domain)41*10465441SEvalZero void vmm_context_init_domain(struct vmm_domain *domain)
42*10465441SEvalZero {
43*10465441SEvalZero     asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (guest_domain_val));
44*10465441SEvalZero 
45*10465441SEvalZero     rt_kprintf("Linux domain: kernel: %d, user: %d, io: %d\n"
46*10465441SEvalZero                "VMM domain: vmm: %d, share: %d\n",
47*10465441SEvalZero                domain->kernel, domain->user, domain->io,
48*10465441SEvalZero                domain->vmm, domain->vmm_share);
49*10465441SEvalZero 
50*10465441SEvalZero     if (domain->kernel == domain->vmm ||
51*10465441SEvalZero         domain->io     == domain->vmm)
52*10465441SEvalZero     {
53*10465441SEvalZero         rt_kprintf("VMM and the guest share the same domain\n");
54*10465441SEvalZero         super_domain_val = vmm_domain_val = guest_domain_val;
55*10465441SEvalZero         return;
56*10465441SEvalZero     }
57*10465441SEvalZero 
58*10465441SEvalZero     vmm_domain_val = guest_domain_val;
59*10465441SEvalZero 
60*10465441SEvalZero     /* become client to our own territory */
61*10465441SEvalZero     vmm_domain_val |= (1 << (domain->vmm * 2)) | (1 << (domain->vmm_share * 2));
62*10465441SEvalZero 
63*10465441SEvalZero     super_domain_val = vmm_domain_val;
64*10465441SEvalZero     /* super domain has access to both side */
65*10465441SEvalZero     super_domain_val |= (1 << (domain->kernel * 2)) | (1 << (domain->user * 2));
66*10465441SEvalZero 
67*10465441SEvalZero     rt_kprintf("Original DAC: 0x%08x\n", guest_domain_val);
68*10465441SEvalZero }
69*10465441SEvalZero 
vmm_context_enter_domain(unsigned long domain_val)70*10465441SEvalZero unsigned long vmm_context_enter_domain(unsigned long domain_val)
71*10465441SEvalZero {
72*10465441SEvalZero     unsigned long old_domain;
73*10465441SEvalZero 
74*10465441SEvalZero     asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (old_domain));
75*10465441SEvalZero     asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val) : "memory");
76*10465441SEvalZero 
77*10465441SEvalZero     return old_domain;
78*10465441SEvalZero }
79*10465441SEvalZero 
vmm_context_restore_domain(unsigned long domain_val)80*10465441SEvalZero void vmm_context_restore_domain(unsigned long domain_val)
81*10465441SEvalZero {
82*10465441SEvalZero     asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val) : "memory");
83*10465441SEvalZero }
84*10465441SEvalZero #endif
85*10465441SEvalZero 
vmm_virq_pending(int irq)86*10465441SEvalZero void vmm_virq_pending(int irq)
87*10465441SEvalZero {
88*10465441SEvalZero     /* when running this piece of code, the guest is already suspended. So it's
89*10465441SEvalZero      * safe to set the bits without locks. */
90*10465441SEvalZero     _vmm_context->virq_pending[irq / 32] |= (1 << (irq % 32));
91*10465441SEvalZero     _vmm_context->virq_pended = 1;
92*10465441SEvalZero     /* mask this IRQ in host */
93*10465441SEvalZero     rt_hw_interrupt_mask(irq);
94*10465441SEvalZero }
95*10465441SEvalZero 
vmm_virq_update(void)96*10465441SEvalZero void vmm_virq_update(void)
97*10465441SEvalZero {
98*10465441SEvalZero     if ((!_vmm_context->virq_status) &&
99*10465441SEvalZero         ( _vmm_context->virq_pended))
100*10465441SEvalZero     {
101*10465441SEvalZero         rt_hw_interrupt_trigger(RT_VMM_VIRQ_TRIGGER);
102*10465441SEvalZero     }
103*10465441SEvalZero }
104*10465441SEvalZero 
105*10465441SEvalZero /** check the guest IRQ status
106*10465441SEvalZero  *
107*10465441SEvalZero  * @return 0 on guest should handle IRQ, -1 on should restore the guest context
108*10465441SEvalZero  * normally.
109*10465441SEvalZero  */
vmm_virq_check(void)110*10465441SEvalZero int vmm_virq_check(void)
111*10465441SEvalZero {
112*10465441SEvalZero     if ((!_vmm_context->virq_status) &&
113*10465441SEvalZero         ( _vmm_context->virq_pended))
114*10465441SEvalZero     {
115*10465441SEvalZero         return 0;
116*10465441SEvalZero     }
117*10465441SEvalZero 
118*10465441SEvalZero     return -1;
119*10465441SEvalZero }
120*10465441SEvalZero 
121*10465441SEvalZero /* 10 = len("%08x, ") */
122*10465441SEvalZero static char _vmbuf[10*ARRAY_SIZE(_vmm_context->virq_pending)];
vmm_dump_virq(void)123*10465441SEvalZero void vmm_dump_virq(void)
124*10465441SEvalZero {
125*10465441SEvalZero     int i, s;
126*10465441SEvalZero 
127*10465441SEvalZero     vmm_info("---- virtual IRQ ----\n");
128*10465441SEvalZero     vmm_info("  status: %08x,   pended: %08x, pending:\n",
129*10465441SEvalZero                _vmm_context->virq_status, _vmm_context->virq_pended);
130*10465441SEvalZero     for (s = 0, i = 0; i < ARRAY_SIZE(_vmm_context->virq_pending); i++)
131*10465441SEvalZero     {
132*10465441SEvalZero         s += rt_snprintf(_vmbuf+s, sizeof(_vmbuf)-s,
133*10465441SEvalZero                          "%08x, ", _vmm_context->virq_pending[i]);
134*10465441SEvalZero     }
135*10465441SEvalZero     vmm_info("%.*s\n", sizeof(_vmbuf), _vmbuf);
136*10465441SEvalZero     vmm_info("---- virtual IRQ ----\n");
137*10465441SEvalZero }
138*10465441SEvalZero 
vmm_virq_coherence_ok(void)139*10465441SEvalZero int vmm_virq_coherence_ok(void)
140*10465441SEvalZero {
141*10465441SEvalZero     int i, res;
142*10465441SEvalZero     int should_pend = 0;
143*10465441SEvalZero 
144*10465441SEvalZero     for (i = 0; i < ARRAY_SIZE(_vmm_context->virq_pending); i++)
145*10465441SEvalZero     {
146*10465441SEvalZero         should_pend |= _vmm_context->virq_pending[i];
147*10465441SEvalZero     }
148*10465441SEvalZero 
149*10465441SEvalZero     res = (_vmm_context->virq_pended == !!should_pend);
150*10465441SEvalZero 
151*10465441SEvalZero     if (!res)
152*10465441SEvalZero     {
153*10465441SEvalZero         vmm_info("--- %x %x, %x\n",
154*10465441SEvalZero                  _vmm_context->virq_pended, should_pend, !!should_pend);
155*10465441SEvalZero     }
156*10465441SEvalZero 
157*10465441SEvalZero     return res;
158*10465441SEvalZero }
159*10465441SEvalZero 
160*10465441SEvalZero extern struct rt_thread vmm_thread;
161*10465441SEvalZero 
vmm_show_guest_reg(void)162*10465441SEvalZero void vmm_show_guest_reg(void)
163*10465441SEvalZero {
164*10465441SEvalZero     struct rt_hw_stack *sp = vmm_thread.sp;
165*10465441SEvalZero #ifdef RT_VMM_USING_DOMAIN
166*10465441SEvalZero     unsigned long old_domain;
167*10465441SEvalZero 
168*10465441SEvalZero     old_domain = vmm_context_enter_domain(super_domain_val);
169*10465441SEvalZero #endif
170*10465441SEvalZero 
171*10465441SEvalZero     vmm_info("CPSR: %08x, PC: %08x, LR: %08x, SP: %08x\n",
172*10465441SEvalZero              sp->cpsr, sp->pc, sp->lr, sp+1);
173*10465441SEvalZero 
174*10465441SEvalZero #ifdef RT_VMM_USING_DOMAIN
175*10465441SEvalZero     vmm_context_restore_domain(old_domain);
176*10465441SEvalZero #endif
177*10465441SEvalZero }
178*10465441SEvalZero 
vmm_dump_domain(void)179*10465441SEvalZero void vmm_dump_domain(void)
180*10465441SEvalZero {
181*10465441SEvalZero     unsigned long dac;
182*10465441SEvalZero 
183*10465441SEvalZero     asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (dac));
184*10465441SEvalZero     vmm_info("current DAC: %08x\n", dac);
185*10465441SEvalZero #ifdef RT_VMM_USING_DOMAIN
186*10465441SEvalZero     vmm_info("guest DAC: %08x, RTT DAC: %08x, super DAC: %08x\n",
187*10465441SEvalZero              guest_domain_val, vmm_domain_val, super_domain_val);
188*10465441SEvalZero #endif
189*10465441SEvalZero }
190*10465441SEvalZero 
vmm_show_guest(void)191*10465441SEvalZero void vmm_show_guest(void)
192*10465441SEvalZero {
193*10465441SEvalZero     vmm_show_guest_reg();
194*10465441SEvalZero     vmm_dump_virq();
195*10465441SEvalZero     vmm_dump_domain();
196*10465441SEvalZero }
197*10465441SEvalZero 
198*10465441SEvalZero #ifdef RT_USING_FINSH
199*10465441SEvalZero #include <finsh.h>
200*10465441SEvalZero FINSH_FUNCTION_EXPORT_ALIAS(vmm_show_guest, vmm, show vmm status);
201*10465441SEvalZero #endif
202*10465441SEvalZero 
_bad_cpsr(unsigned long cpsr)203*10465441SEvalZero static int _bad_cpsr(unsigned long cpsr)
204*10465441SEvalZero {
205*10465441SEvalZero     int bad = 1;
206*10465441SEvalZero 
207*10465441SEvalZero     switch (cpsr & MODEMASK)
208*10465441SEvalZero     {
209*10465441SEvalZero     case USERMODE:
210*10465441SEvalZero     case FIQMODE:
211*10465441SEvalZero     case IRQMODE:
212*10465441SEvalZero     case SVCMODE:
213*10465441SEvalZero #ifdef CPU_HAS_MONITOR_MODE
214*10465441SEvalZero     case MONITORMODE:
215*10465441SEvalZero #endif
216*10465441SEvalZero     case ABORTMODE:
217*10465441SEvalZero #ifdef CPU_HAS_HYP_MODE
218*10465441SEvalZero     case HYPMODE:
219*10465441SEvalZero #endif
220*10465441SEvalZero     case UNDEFMODE:
221*10465441SEvalZero     case MODEMASK:
222*10465441SEvalZero         bad = 0;
223*10465441SEvalZero         break;
224*10465441SEvalZero     };
225*10465441SEvalZero     return bad;
226*10465441SEvalZero }
227*10465441SEvalZero 
vmm_verify_guest_status(struct rt_hw_stack * sp)228*10465441SEvalZero void vmm_verify_guest_status(struct rt_hw_stack *sp)
229*10465441SEvalZero {
230*10465441SEvalZero     int dump_vmm = 0;
231*10465441SEvalZero     unsigned long cpsr;
232*10465441SEvalZero #ifdef RT_VMM_USING_DOMAIN
233*10465441SEvalZero     unsigned long old_domain;
234*10465441SEvalZero 
235*10465441SEvalZero     old_domain = vmm_context_enter_domain(super_domain_val);
236*10465441SEvalZero #endif
237*10465441SEvalZero 
238*10465441SEvalZero     cpsr = sp->cpsr;
239*10465441SEvalZero     if (_bad_cpsr(cpsr))
240*10465441SEvalZero     {
241*10465441SEvalZero             vmm_info("=================================\n");
242*10465441SEvalZero             vmm_info("VMM WARING: bad CPSR in guest\n");
243*10465441SEvalZero             dump_vmm = 1;
244*10465441SEvalZero     }
245*10465441SEvalZero     else
246*10465441SEvalZero     {
247*10465441SEvalZero         if (cpsr & A_Bit && 0)
248*10465441SEvalZero         {
249*10465441SEvalZero             vmm_info("=================================\n");
250*10465441SEvalZero             vmm_info("VMM WARING: A bit is set in guest\n");
251*10465441SEvalZero             dump_vmm = 1;
252*10465441SEvalZero         }
253*10465441SEvalZero         if ((cpsr & I_Bit) && (sp->pc <= VMM_BEGIN))
254*10465441SEvalZero         {
255*10465441SEvalZero             vmm_info("=================================\n");
256*10465441SEvalZero             vmm_info("VMM WARING: IRQ disabled in guest\n");
257*10465441SEvalZero             dump_vmm = 1;
258*10465441SEvalZero         }
259*10465441SEvalZero         if (cpsr & F_Bit)
260*10465441SEvalZero         {
261*10465441SEvalZero             vmm_info("=================================\n");
262*10465441SEvalZero             vmm_info("VMM WARING: FIQ disabled in guest\n");
263*10465441SEvalZero             dump_vmm = 1;
264*10465441SEvalZero         }
265*10465441SEvalZero         if ((cpsr & MODEMASK) == USERMODE)
266*10465441SEvalZero         {
267*10465441SEvalZero             if (_vmm_context->virq_status & 1)
268*10465441SEvalZero             {
269*10465441SEvalZero                 vmm_info("=================================\n");
270*10465441SEvalZero                 vmm_info("VMM WARING: VIRQ disabled in user mode\n");
271*10465441SEvalZero                 dump_vmm = 1;
272*10465441SEvalZero             }
273*10465441SEvalZero             if ((sp->pc > 0xbf000000) && (sp->pc < 0xffff0000))
274*10465441SEvalZero             {
275*10465441SEvalZero                 vmm_info("=================================\n");
276*10465441SEvalZero                 vmm_info("VMM WARING: executing kernel code in usr mode\n");
277*10465441SEvalZero                 dump_vmm = 1;
278*10465441SEvalZero             }
279*10465441SEvalZero             /* FIXME: when the guest is suspended in user mode and its
280*10465441SEvalZero              * interrupts come, this can be misleading. */
281*10465441SEvalZero #if 0
282*10465441SEvalZero             if (_vmm_context->virq_pended)
283*10465441SEvalZero             {
284*10465441SEvalZero                 vmm_info("=================================\n");
285*10465441SEvalZero                 vmm_info("VMM WARING: VIRQ pended in user mode\n");
286*10465441SEvalZero                 dump_vmm = 1;
287*10465441SEvalZero             }
288*10465441SEvalZero #endif
289*10465441SEvalZero         }
290*10465441SEvalZero         else if ((cpsr & MODEMASK) == SVCMODE && sp->pc < 0xbf000000)
291*10465441SEvalZero         {
292*10465441SEvalZero             vmm_info("=================================\n");
293*10465441SEvalZero             vmm_info("VMM WARING: executing usr code in svc mode\n");
294*10465441SEvalZero             dump_vmm = 1;
295*10465441SEvalZero         }
296*10465441SEvalZero     }
297*10465441SEvalZero 
298*10465441SEvalZero #if 0
299*10465441SEvalZero     if (!vmm_virq_coherence_ok())
300*10465441SEvalZero     {
301*10465441SEvalZero         vmm_info("=================================\n");
302*10465441SEvalZero         vmm_info("VMM WARING: bad VIRQ status\n");
303*10465441SEvalZero         dump_vmm = 1;
304*10465441SEvalZero     }
305*10465441SEvalZero #endif
306*10465441SEvalZero 
307*10465441SEvalZero     if (dump_vmm)
308*10465441SEvalZero     {
309*10465441SEvalZero         vmm_show_guest();
310*10465441SEvalZero         vmm_info("=================================\n");
311*10465441SEvalZero     }
312*10465441SEvalZero 
313*10465441SEvalZero #ifdef RT_VMM_USING_DOMAIN
314*10465441SEvalZero     vmm_context_restore_domain(old_domain);
315*10465441SEvalZero #endif
316*10465441SEvalZero }
317*10465441SEvalZero 
318