xref: /nrf52832-nimble/rt-thread/components/vmm/vmm_context.c (revision 104654410c56c573564690304ae786df310c91fc)
1 /*
2  * COPYRIGHT (C) 2013-2014, Real-Thread Information Technology Ltd
3  * All rights reserved
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  *
7  * Change Logs:
8  * Date           Author       Notes
9  * 2013-11-04     Grissiom     add comment
10  */
11 
12 #include <rthw.h>
13 #include <rtthread.h>
14 #include <interrupt.h>
15 
16 #include <log_trace.h>
17 #include <vmm.h>
18 
19 #include "vmm_context.h"
20 
21 struct rt_vmm_share_layout rt_vmm_share SECTION(".vmm.share");
22 
23 volatile struct vmm_context *_vmm_context = RT_NULL;
24 
vmm_context_init(void * context_addr)25 void vmm_context_init(void *context_addr)
26 {
27     _vmm_context = (struct vmm_context *)context_addr;
28     rt_memset((void *)_vmm_context, 0x00, sizeof(struct vmm_context));
29     /* When loading RT-Thread, the IRQ on the guest should be disabled. */
30     _vmm_context->virq_status = 1;
31 }
32 
33 #ifdef RT_VMM_USING_DOMAIN
34 unsigned long guest_domain_val SECTION(".bss.share");
35 unsigned long vmm_domain_val SECTION(".bss.share");
36 /* some RT-Thread code need to be called in the guest
37  * context(rt_thread_idle_excute for example). To simplify the code, we need a
38  * "super" domain mode to have access of both side. The code executed in super
39  * domain mode is restricted and should be harmless. */
40 unsigned long super_domain_val SECTION(".bss.share");
vmm_context_init_domain(struct vmm_domain * domain)41 void vmm_context_init_domain(struct vmm_domain *domain)
42 {
43     asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (guest_domain_val));
44 
45     rt_kprintf("Linux domain: kernel: %d, user: %d, io: %d\n"
46                "VMM domain: vmm: %d, share: %d\n",
47                domain->kernel, domain->user, domain->io,
48                domain->vmm, domain->vmm_share);
49 
50     if (domain->kernel == domain->vmm ||
51         domain->io     == domain->vmm)
52     {
53         rt_kprintf("VMM and the guest share the same domain\n");
54         super_domain_val = vmm_domain_val = guest_domain_val;
55         return;
56     }
57 
58     vmm_domain_val = guest_domain_val;
59 
60     /* become client to our own territory */
61     vmm_domain_val |= (1 << (domain->vmm * 2)) | (1 << (domain->vmm_share * 2));
62 
63     super_domain_val = vmm_domain_val;
64     /* super domain has access to both side */
65     super_domain_val |= (1 << (domain->kernel * 2)) | (1 << (domain->user * 2));
66 
67     rt_kprintf("Original DAC: 0x%08x\n", guest_domain_val);
68 }
69 
vmm_context_enter_domain(unsigned long domain_val)70 unsigned long vmm_context_enter_domain(unsigned long domain_val)
71 {
72     unsigned long old_domain;
73 
74     asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (old_domain));
75     asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val) : "memory");
76 
77     return old_domain;
78 }
79 
vmm_context_restore_domain(unsigned long domain_val)80 void vmm_context_restore_domain(unsigned long domain_val)
81 {
82     asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val) : "memory");
83 }
84 #endif
85 
vmm_virq_pending(int irq)86 void vmm_virq_pending(int irq)
87 {
88     /* when running this piece of code, the guest is already suspended. So it's
89      * safe to set the bits without locks. */
90     _vmm_context->virq_pending[irq / 32] |= (1 << (irq % 32));
91     _vmm_context->virq_pended = 1;
92     /* mask this IRQ in host */
93     rt_hw_interrupt_mask(irq);
94 }
95 
vmm_virq_update(void)96 void vmm_virq_update(void)
97 {
98     if ((!_vmm_context->virq_status) &&
99         ( _vmm_context->virq_pended))
100     {
101         rt_hw_interrupt_trigger(RT_VMM_VIRQ_TRIGGER);
102     }
103 }
104 
105 /** check the guest IRQ status
106  *
107  * @return 0 on guest should handle IRQ, -1 on should restore the guest context
108  * normally.
109  */
vmm_virq_check(void)110 int vmm_virq_check(void)
111 {
112     if ((!_vmm_context->virq_status) &&
113         ( _vmm_context->virq_pended))
114     {
115         return 0;
116     }
117 
118     return -1;
119 }
120 
121 /* 10 = len("%08x, ") */
122 static char _vmbuf[10*ARRAY_SIZE(_vmm_context->virq_pending)];
vmm_dump_virq(void)123 void vmm_dump_virq(void)
124 {
125     int i, s;
126 
127     vmm_info("---- virtual IRQ ----\n");
128     vmm_info("  status: %08x,   pended: %08x, pending:\n",
129                _vmm_context->virq_status, _vmm_context->virq_pended);
130     for (s = 0, i = 0; i < ARRAY_SIZE(_vmm_context->virq_pending); i++)
131     {
132         s += rt_snprintf(_vmbuf+s, sizeof(_vmbuf)-s,
133                          "%08x, ", _vmm_context->virq_pending[i]);
134     }
135     vmm_info("%.*s\n", sizeof(_vmbuf), _vmbuf);
136     vmm_info("---- virtual IRQ ----\n");
137 }
138 
vmm_virq_coherence_ok(void)139 int vmm_virq_coherence_ok(void)
140 {
141     int i, res;
142     int should_pend = 0;
143 
144     for (i = 0; i < ARRAY_SIZE(_vmm_context->virq_pending); i++)
145     {
146         should_pend |= _vmm_context->virq_pending[i];
147     }
148 
149     res = (_vmm_context->virq_pended == !!should_pend);
150 
151     if (!res)
152     {
153         vmm_info("--- %x %x, %x\n",
154                  _vmm_context->virq_pended, should_pend, !!should_pend);
155     }
156 
157     return res;
158 }
159 
160 extern struct rt_thread vmm_thread;
161 
vmm_show_guest_reg(void)162 void vmm_show_guest_reg(void)
163 {
164     struct rt_hw_stack *sp = vmm_thread.sp;
165 #ifdef RT_VMM_USING_DOMAIN
166     unsigned long old_domain;
167 
168     old_domain = vmm_context_enter_domain(super_domain_val);
169 #endif
170 
171     vmm_info("CPSR: %08x, PC: %08x, LR: %08x, SP: %08x\n",
172              sp->cpsr, sp->pc, sp->lr, sp+1);
173 
174 #ifdef RT_VMM_USING_DOMAIN
175     vmm_context_restore_domain(old_domain);
176 #endif
177 }
178 
vmm_dump_domain(void)179 void vmm_dump_domain(void)
180 {
181     unsigned long dac;
182 
183     asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (dac));
184     vmm_info("current DAC: %08x\n", dac);
185 #ifdef RT_VMM_USING_DOMAIN
186     vmm_info("guest DAC: %08x, RTT DAC: %08x, super DAC: %08x\n",
187              guest_domain_val, vmm_domain_val, super_domain_val);
188 #endif
189 }
190 
vmm_show_guest(void)191 void vmm_show_guest(void)
192 {
193     vmm_show_guest_reg();
194     vmm_dump_virq();
195     vmm_dump_domain();
196 }
197 
198 #ifdef RT_USING_FINSH
199 #include <finsh.h>
200 FINSH_FUNCTION_EXPORT_ALIAS(vmm_show_guest, vmm, show vmm status);
201 #endif
202 
_bad_cpsr(unsigned long cpsr)203 static int _bad_cpsr(unsigned long cpsr)
204 {
205     int bad = 1;
206 
207     switch (cpsr & MODEMASK)
208     {
209     case USERMODE:
210     case FIQMODE:
211     case IRQMODE:
212     case SVCMODE:
213 #ifdef CPU_HAS_MONITOR_MODE
214     case MONITORMODE:
215 #endif
216     case ABORTMODE:
217 #ifdef CPU_HAS_HYP_MODE
218     case HYPMODE:
219 #endif
220     case UNDEFMODE:
221     case MODEMASK:
222         bad = 0;
223         break;
224     };
225     return bad;
226 }
227 
vmm_verify_guest_status(struct rt_hw_stack * sp)228 void vmm_verify_guest_status(struct rt_hw_stack *sp)
229 {
230     int dump_vmm = 0;
231     unsigned long cpsr;
232 #ifdef RT_VMM_USING_DOMAIN
233     unsigned long old_domain;
234 
235     old_domain = vmm_context_enter_domain(super_domain_val);
236 #endif
237 
238     cpsr = sp->cpsr;
239     if (_bad_cpsr(cpsr))
240     {
241             vmm_info("=================================\n");
242             vmm_info("VMM WARING: bad CPSR in guest\n");
243             dump_vmm = 1;
244     }
245     else
246     {
247         if (cpsr & A_Bit && 0)
248         {
249             vmm_info("=================================\n");
250             vmm_info("VMM WARING: A bit is set in guest\n");
251             dump_vmm = 1;
252         }
253         if ((cpsr & I_Bit) && (sp->pc <= VMM_BEGIN))
254         {
255             vmm_info("=================================\n");
256             vmm_info("VMM WARING: IRQ disabled in guest\n");
257             dump_vmm = 1;
258         }
259         if (cpsr & F_Bit)
260         {
261             vmm_info("=================================\n");
262             vmm_info("VMM WARING: FIQ disabled in guest\n");
263             dump_vmm = 1;
264         }
265         if ((cpsr & MODEMASK) == USERMODE)
266         {
267             if (_vmm_context->virq_status & 1)
268             {
269                 vmm_info("=================================\n");
270                 vmm_info("VMM WARING: VIRQ disabled in user mode\n");
271                 dump_vmm = 1;
272             }
273             if ((sp->pc > 0xbf000000) && (sp->pc < 0xffff0000))
274             {
275                 vmm_info("=================================\n");
276                 vmm_info("VMM WARING: executing kernel code in usr mode\n");
277                 dump_vmm = 1;
278             }
279             /* FIXME: when the guest is suspended in user mode and its
280              * interrupts come, this can be misleading. */
281 #if 0
282             if (_vmm_context->virq_pended)
283             {
284                 vmm_info("=================================\n");
285                 vmm_info("VMM WARING: VIRQ pended in user mode\n");
286                 dump_vmm = 1;
287             }
288 #endif
289         }
290         else if ((cpsr & MODEMASK) == SVCMODE && sp->pc < 0xbf000000)
291         {
292             vmm_info("=================================\n");
293             vmm_info("VMM WARING: executing usr code in svc mode\n");
294             dump_vmm = 1;
295         }
296     }
297 
298 #if 0
299     if (!vmm_virq_coherence_ok())
300     {
301         vmm_info("=================================\n");
302         vmm_info("VMM WARING: bad VIRQ status\n");
303         dump_vmm = 1;
304     }
305 #endif
306 
307     if (dump_vmm)
308     {
309         vmm_show_guest();
310         vmm_info("=================================\n");
311     }
312 
313 #ifdef RT_VMM_USING_DOMAIN
314     vmm_context_restore_domain(old_domain);
315 #endif
316 }
317 
318