1From d001bd8483c805c45a42d9bd0468a96722e72875 Mon Sep 17 00:00:00 2001
2From: Grissiom <[email protected]>
3Date: Thu, 1 Aug 2013 14:59:56 +0800
4Subject: [PATCH 1/2] RTT-VMM: implement dual system running on realview-pb-a8
5
6Signed-off-by: Grissiom <[email protected]>
7Signed-off-by: Bernard.Xiong <[email protected]>
8---
9 arch/arm/Kconfig                   |   1 +
10 arch/arm/Makefile                  |   1 +
11 arch/arm/common/gic.c              |  67 +++++++++++++-
12 arch/arm/include/asm/assembler.h   |   8 +-
13 arch/arm/include/asm/domain.h      |   7 ++
14 arch/arm/include/asm/irqflags.h    |  84 ++++++++++++-----
15 arch/arm/include/asm/mach/map.h    |   5 +
16 arch/arm/include/vmm/vmm.h         |  35 +++++++
17 arch/arm/include/vmm/vmm_config.h  |   7 ++
18 arch/arm/kernel/entry-armv.S       |  30 +++++-
19 arch/arm/kernel/entry-common.S     |   3 +
20 arch/arm/kernel/entry-header.S     |  15 ++-
21 arch/arm/mach-omap2/irq.c          |  12 +++
22 arch/arm/mm/fault.c                |   9 ++
23 arch/arm/mm/init.c                 |   8 ++
24 arch/arm/mm/mmu.c                  |  44 +++++++++
25 arch/arm/vmm/Kconfig               |  49 ++++++++++
26 arch/arm/vmm/Makefile              |  10 ++
27 arch/arm/vmm/README                |   1 +
28 arch/arm/vmm/am33xx/intc.h         |  13 +++
29 arch/arm/vmm/am33xx/softirq.c      |  14 +++
30 arch/arm/vmm/am33xx/virq.c         |  48 ++++++++++
31 arch/arm/vmm/realview_a8/softirq.c |  12 +++
32 arch/arm/vmm/vmm.c                 |  32 +++++++
33 arch/arm/vmm/vmm_traps.c           |  37 ++++++++
34 arch/arm/vmm/vmm_virhw.h           |  59 ++++++++++++
35 arch/arm/vmm/vmm_virq.c            | 183 +++++++++++++++++++++++++++++++++++++
36 27 files changed, 767 insertions(+), 27 deletions(-)
37 create mode 100644 arch/arm/include/vmm/vmm.h
38 create mode 100644 arch/arm/include/vmm/vmm_config.h
39 create mode 100644 arch/arm/vmm/Kconfig
40 create mode 100644 arch/arm/vmm/Makefile
41 create mode 100644 arch/arm/vmm/README
42 create mode 100644 arch/arm/vmm/am33xx/intc.h
43 create mode 100644 arch/arm/vmm/am33xx/softirq.c
44 create mode 100644 arch/arm/vmm/am33xx/virq.c
45 create mode 100644 arch/arm/vmm/realview_a8/softirq.c
46 create mode 100644 arch/arm/vmm/vmm.c
47 create mode 100644 arch/arm/vmm/vmm_traps.c
48 create mode 100644 arch/arm/vmm/vmm_virhw.h
49 create mode 100644 arch/arm/vmm/vmm_virq.c
50
51diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
52index 67874b8..eb82cd6 100644
53--- a/arch/arm/Kconfig
54+++ b/arch/arm/Kconfig
55@@ -1164,6 +1164,7 @@ config ARM_TIMER_SP804
56 	select HAVE_SCHED_CLOCK
57
58 source arch/arm/mm/Kconfig
59+source arch/arm/vmm/Kconfig
60
61 config ARM_NR_BANKS
62 	int
63diff --git a/arch/arm/Makefile b/arch/arm/Makefile
64index 30c443c..262c8e2 100644
65--- a/arch/arm/Makefile
66+++ b/arch/arm/Makefile
67@@ -252,6 +252,7 @@ core-$(CONFIG_FPE_NWFPE)	+= arch/arm/nwfpe/
68 core-$(CONFIG_FPE_FASTFPE)	+= $(FASTFPE_OBJ)
69 core-$(CONFIG_VFP)		+= arch/arm/vfp/
70 core-$(CONFIG_XEN)		+= arch/arm/xen/
71+core-$(CONFIG_ARM_VMM)		+= arch/arm/vmm/
72
73 # If we have a machine-specific directory, then include it in the build.
74 core-y				+= arch/arm/kernel/ arch/arm/mm/ arch/arm/common/
75diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
76index 87dfa90..a9d7357 100644
77--- a/arch/arm/common/gic.c
78+++ b/arch/arm/common/gic.c
79@@ -45,6 +45,11 @@
80 #include <asm/mach/irq.h>
81 #include <asm/hardware/gic.h>
82
83+#ifdef CONFIG_ARM_VMM
84+#include <vmm/vmm.h>
85+#include "../vmm/vmm_virhw.h"
86+#endif
87+
88 union gic_base {
89 	void __iomem *common_base;
90 	void __percpu __iomem **percpu_base;
91@@ -276,12 +281,72 @@ static int gic_set_wake(struct irq_data *d, unsigned int on)
92 #define gic_set_wake	NULL
93 #endif
94
95+#ifdef CONFIG_ARM_VMM
96+void vmm_irq_handle(struct gic_chip_data *gic, struct pt_regs *regs)
97+{
98+	unsigned long flags;
99+	struct vmm_context* _vmm_context;
100+
101+	_vmm_context = vmm_context_get();
102+
103+	while (_vmm_context->virq_pended) {
104+		int index;
105+
106+		flags = vmm_irq_save();
107+		_vmm_context->virq_pended = 0;
108+		vmm_irq_restore(flags);
109+
110+		/* get the pending interrupt */
111+		for (index = 0; index < IRQS_NR_32; index++) {
112+			int pdbit;
113+
114+			for (pdbit = __builtin_ffs(_vmm_context->virq_pending[index]);
115+			     pdbit != 0;
116+			     pdbit = __builtin_ffs(_vmm_context->virq_pending[index])) {
117+				unsigned long inner_flag;
118+				int irqnr, oirqnr;
119+
120+				pdbit--;
121+
122+				inner_flag = vmm_irq_save();
123+				_vmm_context->virq_pending[index] &= ~(1 << pdbit);
124+				vmm_irq_restore(inner_flag);
125+
126+				oirqnr = pdbit + index * 32;
127+				if (likely(oirqnr > 15 && oirqnr < 1021)) {
128+					irqnr = irq_find_mapping(gic->domain, oirqnr);
129+					handle_IRQ(irqnr, regs);
130+				} else if (oirqnr < 16) {
131+					/* soft IRQs are EOIed by the host. */
132+#ifdef CONFIG_SMP
133+					handle_IPI(oirqnr, regs);
134+#endif
135+				}
136+				/* umask interrupt */
137+				/* FIXME: maybe we don't need this */
138+				writel_relaxed(1 << (oirqnr % 32),
139+					       gic_data_dist_base(gic)
140+					       + GIC_DIST_ENABLE_SET
141+					       + (oirqnr / 32) * 4);
142+
143+			}
144+		}
145+	}
146+}
147+#endif
148+
149 asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
150 {
151 	u32 irqstat, irqnr;
152 	struct gic_chip_data *gic = &gic_data[0];
153 	void __iomem *cpu_base = gic_data_cpu_base(gic);
154
155+#ifdef CONFIG_ARM_VMM
156+	if (vmm_get_status()) {
157+		vmm_irq_handle(gic, regs);
158+		return;
159+	}
160+#endif
161 	do {
162 		irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
163 		irqnr = irqstat & ~0x1c00;
164@@ -777,7 +842,7 @@ void __cpuinit gic_secondary_init(unsigned int gic_nr)
165 	gic_cpu_init(&gic_data[gic_nr]);
166 }
167
168-#ifdef CONFIG_SMP
169+#if defined(CONFIG_SMP) || defined(CONFIG_ARM_VMM)
170 void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
171 {
172 	int cpu;
173diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
174index eb87200..b646fa7 100644
175--- a/arch/arm/include/asm/assembler.h
176+++ b/arch/arm/include/asm/assembler.h
177@@ -82,11 +82,15 @@
178  */
179 #if __LINUX_ARM_ARCH__ >= 6
180 	.macro	disable_irq_notrace
181-	cpsid	i
182+	stmdb   sp!, {r0-r3, ip, lr}
183+	bl	irq_disable_asm
184+	ldmia	sp!, {r0-r3, ip, lr}
185 	.endm
186
187 	.macro	enable_irq_notrace
188-	cpsie	i
189+	stmdb   sp!, {r0-r3, ip, lr}
190+	bl	irq_enable_asm
191+	ldmia	sp!, {r0-r3, ip, lr}
192 	.endm
193 #else
194 	.macro	disable_irq_notrace
195diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
196index 6ddbe44..bbc4470 100644
197--- a/arch/arm/include/asm/domain.h
198+++ b/arch/arm/include/asm/domain.h
199@@ -44,6 +44,13 @@
200 #define DOMAIN_IO	0
201 #endif
202
203+#ifdef CONFIG_ARM_VMM
204+/* RT-Thread VMM memory space */
205+#define DOMAIN_RTVMM      3
206+/* shared memory with VMM and Linux */
207+#define DOMAIN_RTVMM_SHR  4
208+#endif
209+
210 /*
211  * Domain types
212  */
213diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h
214index 1e6cca5..bfaedff 100644
215--- a/arch/arm/include/asm/irqflags.h
216+++ b/arch/arm/include/asm/irqflags.h
217@@ -9,34 +9,56 @@
218  * CPU interrupt mask handling.
219  */
220 #if __LINUX_ARM_ARCH__ >= 6
221+#include <vmm/vmm.h> /* VMM only support ARMv7 right now */
222
223 static inline unsigned long arch_local_irq_save(void)
224 {
225 	unsigned long flags;
226
227-	asm volatile(
228-		"	mrs	%0, cpsr	@ arch_local_irq_save\n"
229-		"	cpsid	i"
230-		: "=r" (flags) : : "memory", "cc");
231+	if (vmm_status)
232+	{
233+		flags = vmm_save_virq();
234+	}
235+	else
236+	{
237+		asm volatile(
238+			"	mrs	%0, cpsr	@ arch_local_irq_save\n"
239+			"	cpsid	i"
240+			: "=r" (flags) : : "memory", "cc");
241+	}
242 	return flags;
243 }
244
245 static inline void arch_local_irq_enable(void)
246 {
247-	asm volatile(
248-		"	cpsie i			@ arch_local_irq_enable"
249-		:
250-		:
251-		: "memory", "cc");
252+	if (vmm_status)
253+	{
254+		vmm_enable_virq();
255+	}
256+	else
257+	{
258+		asm volatile(
259+			"	cpsie i			@ arch_local_irq_enable"
260+			:
261+			:
262+			: "memory", "cc");
263+	}
264 }
265
266 static inline void arch_local_irq_disable(void)
267 {
268-	asm volatile(
269-		"	cpsid i			@ arch_local_irq_disable"
270-		:
271-		:
272-		: "memory", "cc");
273+	if (vmm_status)
274+	{
275+		vmm_disable_virq();
276+	}
277+	else
278+	{
279+		asm volatile(
280+			"	cpsid i			@ arch_local_irq_disable"
281+			:
282+			:
283+			: "memory", "cc");
284+	}
285 }
286
287 #define local_fiq_enable()  __asm__("cpsie f	@ __stf" : : : "memory", "cc")
288@@ -128,9 +150,17 @@ static inline void arch_local_irq_disable(void)
289 static inline unsigned long arch_local_save_flags(void)
290 {
291 	unsigned long flags;
292-	asm volatile(
293-		"	mrs	%0, cpsr	@ local_save_flags"
294-		: "=r" (flags) : : "memory", "cc");
295+
296+	if (vmm_status)
297+	{
298+		flags = vmm_return_virq();
299+	}
300+	else
301+	{
302+		asm volatile(
303+			"	mrs	%0, cpsr	@ local_save_flags"
304+			: "=r" (flags) : : "memory", "cc");
305+	}
306 	return flags;
307 }
308
309@@ -139,15 +169,25 @@ static inline unsigned long arch_local_save_flags(void)
310  */
311 static inline void arch_local_irq_restore(unsigned long flags)
312 {
313-	asm volatile(
314-		"	msr	cpsr_c, %0	@ local_irq_restore"
315-		:
316-		: "r" (flags)
317-		: "memory", "cc");
318+	if (vmm_status)
319+	{
320+		vmm_restore_virq(flags);
321+	}
322+	else
323+	{
324+		asm volatile(
325+			"	msr	cpsr_c, %0	@ local_irq_restore"
326+			:
327+			: "r" (flags)
328+			: "memory", "cc");
329+	}
330 }
331
332 static inline int arch_irqs_disabled_flags(unsigned long flags)
333 {
334+	if (vmm_status)
335+		return (flags == 0x01);
336+
337 	return flags & PSR_I_BIT;
338 }
339
340diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
341index 2fe141f..502b341 100644
342--- a/arch/arm/include/asm/mach/map.h
343+++ b/arch/arm/include/asm/mach/map.h
344@@ -35,6 +35,11 @@ struct map_desc {
345 #define MT_MEMORY_SO		14
346 #define MT_MEMORY_DMA_READY	15
347
348+#ifdef CONFIG_ARM_VMM
349+#define MT_RTVMM                16
350+#define MT_RTVMM_SHARE          17
351+#endif
352+
353 #ifdef CONFIG_MMU
354 extern void iotable_init(struct map_desc *, int);
355 extern void vm_reserve_area_early(unsigned long addr, unsigned long size,
356diff --git a/arch/arm/include/vmm/vmm.h b/arch/arm/include/vmm/vmm.h
357new file mode 100644
358index 0000000..3ff3f31
359--- /dev/null
360+++ b/arch/arm/include/vmm/vmm.h
361@@ -0,0 +1,35 @@
362+#ifndef __LINUX_VMM_H__
363+#define __LINUX_VMM_H__
364+
365+#include <linux/compiler.h>
366+
367+#include "vmm_config.h"
368+
369+struct irq_domain;
370+struct pt_regs;
371+
372+extern int vmm_status;
373+extern struct vmm_context *_vmm_context;
374+
375+/* VMM context routines */
376+void vmm_context_init(void* context);
377+struct vmm_context* vmm_context_get(void);
378+
379+void vmm_set_status(int status);
380+int vmm_get_status(void);
381+
382+void vmm_mem_init(void);
383+void vmm_raise_softirq(int irq);
384+
385+/* VMM vIRQ routines */
386+unsigned long vmm_save_virq(void);
387+unsigned long vmm_return_virq(void);
388+
389+void vmm_restore_virq(unsigned long flags);
390+void vmm_enable_virq(void);
391+void vmm_disable_virq(void);
392+void vmm_enter_hw_noirq(void);
393+
394+void vmm_raise_softirq(int irq);
395+
396+#endif
397diff --git a/arch/arm/include/vmm/vmm_config.h b/arch/arm/include/vmm/vmm_config.h
398new file mode 100644
399index 0000000..cce5e8a
400--- /dev/null
401+++ b/arch/arm/include/vmm/vmm_config.h
402@@ -0,0 +1,7 @@
403+#ifndef __LINUX_VMM_CONFIG_H__
404+#define __LINUX_VMM_CONFIG_H__
405+
406+#define HOST_VMM_ADDR_END 		CONFIG_HOST_VMM_ADDR_END
407+#define HOST_VMM_ADDR_BEGIN 	(CONFIG_HOST_VMM_ADDR_END - CONFIG_HOST_VMM_SIZE)
408+
409+#endif
410diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
411index 0f82098..80f1681 100644
412--- a/arch/arm/kernel/entry-armv.S
413+++ b/arch/arm/kernel/entry-armv.S
414@@ -182,6 +182,15 @@ ENDPROC(__und_invalid)
415 	@
416 	stmia	r7, {r2 - r6}
417
418+	stmdb   sp!, {r0-r3, ip, lr}
419+	mov     r0, r5
420+	add     r1, sp, #4*6
421+	bl      vmm_save_virq_spsr_asm
422+	mov     r5, r0
423+	bl      vmm_switch_nohwirq_to_novirq
424+	ldmia   sp!, {r0-r3, ip, lr}
425+	str     r5, [sp, #S_PSR]              @ fix the pushed SPSR
426+
427 #ifdef CONFIG_TRACE_IRQFLAGS
428 	bl	trace_hardirqs_off
429 #endif
430@@ -208,6 +217,23 @@ __dabt_svc:
431  UNWIND(.fnend		)
432 ENDPROC(__dabt_svc)
433
434+	.macro	svc_exit_irq, rpsr
435+	cpsid   i
436+	msr	spsr_cxsf, \rpsr
437+	mov     r0, \rpsr
438+	bl      vmm_on_svc_exit_irq
439+#if defined(CONFIG_CPU_V6)
440+	ldr	r0, [sp]
441+	strex	r1, r2, [sp]			@ clear the exclusive monitor
442+	ldmib	sp, {r1 - pc}^			@ load r1 - pc, cpsr
443+#elif defined(CONFIG_CPU_32v6K)
444+	clrex					@ clear the exclusive monitor
445+	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
446+#else
447+	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
448+#endif
449+	.endm
450+
451 	.align	5
452 __irq_svc:
453 	svc_entry
454@@ -228,7 +254,7 @@ __irq_svc:
455 	@ the first place, so there's no point checking the PSR I bit.
456 	bl	trace_hardirqs_on
457 #endif
458-	svc_exit r5				@ return from exception
459+	svc_exit_irq r5				@ return from exception
460  UNWIND(.fnend		)
461 ENDPROC(__irq_svc)
462
463@@ -393,6 +419,8 @@ ENDPROC(__pabt_svc)
464 	@
465 	zero_fp
466
467+	bl      vmm_switch_nohwirq_to_novirq
468+
469 #ifdef CONFIG_IRQSOFF_TRACER
470 	bl	trace_hardirqs_off
471 #endif
472diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
473index a6c301e..325a26e 100644
474--- a/arch/arm/kernel/entry-common.S
475+++ b/arch/arm/kernel/entry-common.S
476@@ -349,6 +349,9 @@ ENTRY(vector_swi)
477 	str	lr, [sp, #S_PC]			@ Save calling PC
478 	str	r8, [sp, #S_PSR]		@ Save CPSR
479 	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
480+	stmdb   sp!, {r0-r3, ip, lr}
481+	bl	vmm_switch_nohwirq_to_novirq
482+	ldmia	sp!, {r0-r3, ip, lr}
483 	zero_fp
484
485 	/*
486diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
487index 9a8531e..9e438dc 100644
488--- a/arch/arm/kernel/entry-header.S
489+++ b/arch/arm/kernel/entry-header.S
490@@ -75,7 +75,11 @@
491
492 #ifndef CONFIG_THUMB2_KERNEL
493 	.macro	svc_exit, rpsr
494-	msr	spsr_cxsf, \rpsr
495+	cpsid   i
496+	mov     r0, \rpsr
497+	bl      vmm_restore_virq_asm            @ restore the IRQ to emulate
498+	                                        @ the behavior of ldmia {}^
499+	msr	spsr_cxsf, r0
500 #if defined(CONFIG_CPU_V6)
501 	ldr	r0, [sp]
502 	strex	r1, r2, [sp]			@ clear the exclusive monitor
503@@ -90,6 +94,10 @@
504
505 	.macro	restore_user_regs, fast = 0, offset = 0
506 	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
507+	@ protect the spsr *and* stack we push the registers into this stack
508+	@ and if the sp is not point to the bottom of the stack, IRQ should be
509+	@ disabled.
510+	cpsid   i
511 	ldr	lr, [sp, #\offset + S_PC]!	@ get pc
512 	msr	spsr_cxsf, r1			@ save in spsr_svc
513 #if defined(CONFIG_CPU_V6)
514@@ -105,6 +113,11 @@
515 	mov	r0, r0				@ ARMv5T and earlier require a nop
516 						@ after ldm {}^
517 	add	sp, sp, #S_FRAME_SIZE - S_PC
518+	@ TODO: in some conditions the call to vmm_on_ret_to_usr is useless.
519+	stmdb   sp!, {r0-r3, ip, lr}
520+	mrs     r0, spsr                        @ debug code
521+	bl      vmm_on_ret_to_usr
522+	ldmia	sp!, {r0-r3, ip, lr}
523 	movs	pc, lr				@ return & move spsr_svc into cpsr
524 	.endm
525
526diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
527index 3926f37..252577f 100644
528--- a/arch/arm/mach-omap2/irq.c
529+++ b/arch/arm/mach-omap2/irq.c
530@@ -23,6 +23,10 @@
531 #include <linux/of_address.h>
532 #include <linux/of_irq.h>
533
534+#ifdef CONFIG_ARM_VMM
535+#include <vmm/vmm.h>
536+#endif
537+
538 #include "soc.h"
539 #include "iomap.h"
540 #include "common.h"
541@@ -223,6 +227,14 @@ static inline void omap_intc_handle_irq(void __iomem *base_addr, struct pt_regs
542 {
543 	u32 irqnr;
544
545+#ifdef CONFIG_ARM_VMM
546+	if (vmm_get_status())
547+	{
548+		vmm_irq_handle(base_addr, domain, regs);
549+		return;
550+	}
551+#endif
552+
553 	do {
554 		irqnr = readl_relaxed(base_addr + 0x98);
555 		if (irqnr)
556diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
557index 5dbf13f..e76ba74 100644
558--- a/arch/arm/mm/fault.c
559+++ b/arch/arm/mm/fault.c
560@@ -255,6 +255,10 @@ out:
561 	return fault;
562 }
563
564+#ifdef CONFIG_ARM_VMM
565+#include <vmm/vmm.h>
566+#endif
567+
568 static int __kprobes
569 do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
570 {
571@@ -268,6 +272,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
572 	if (notify_page_fault(regs, fsr))
573 		return 0;
574
575+#ifdef CONFIG_ARM_VMMX
576+	WARN(HOST_VMM_ADDR_BEGIN < regs->ARM_pc &&
577+	       regs->ARM_pc < HOST_VMM_ADDR_END);
578+#endif
579+
580 	tsk = current;
581 	mm  = tsk->mm;
582
583diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
584index ad722f1..ebb4e7f 100644
585--- a/arch/arm/mm/init.c
586+++ b/arch/arm/mm/init.c
587@@ -34,6 +34,10 @@
588 #include <asm/mach/arch.h>
589 #include <asm/mach/map.h>
590
591+#ifdef CONFIG_ARM_VMM
592+#include <vmm/vmm.h>
593+#endif
594+
595 #include "mm.h"
596
597 static unsigned long phys_initrd_start __initdata = 0;
598@@ -338,6 +342,10 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
599 	for (i = 0; i < mi->nr_banks; i++)
600 		memblock_add(mi->bank[i].start, mi->bank[i].size);
601
602+#ifdef CONFIG_ARM_VMM
603+	memblock_reserve(__pa(HOST_VMM_ADDR_BEGIN), HOST_VMM_ADDR_END - HOST_VMM_ADDR_BEGIN);
604+#endif
605+
606 	/* Register the kernel text, kernel data and initrd with memblock. */
607 #ifdef CONFIG_XIP_KERNEL
608 	memblock_reserve(__pa(_sdata), _end - _sdata);
609diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
610index ce328c7..7e7d0ca 100644
611--- a/arch/arm/mm/mmu.c
612+++ b/arch/arm/mm/mmu.c
613@@ -294,6 +294,20 @@ static struct mem_type mem_types[] = {
614 		.prot_l1   = PMD_TYPE_TABLE,
615 		.domain    = DOMAIN_KERNEL,
616 	},
617+#ifdef CONFIG_ARM_VMM
618+	[MT_RTVMM] = {
619+		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
620+		.prot_l1   = PMD_TYPE_TABLE,
621+		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
622+		.domain    = DOMAIN_RTVMM,
623+	},
624+	[MT_RTVMM_SHARE] = {
625+		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
626+		.prot_l1   = PMD_TYPE_TABLE,
627+		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
628+		.domain    = DOMAIN_RTVMM_SHR,
629+	},
630+#endif
631 };
632
633 const struct mem_type *get_mem_type(unsigned int type)
634@@ -450,6 +464,9 @@ static void __init build_mem_type_table(void)
635 			mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
636 			mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
637 			mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
638+#ifdef CONFIG_ARM_VMM
639+			/* FIXME */
640+#endif
641 			mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
642 			mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
643 			mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
644@@ -503,6 +520,12 @@ static void __init build_mem_type_table(void)
645 	mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
646 	mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
647 	mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
648+#ifdef CONFIG_ARM_VMM
649+	mem_types[MT_RTVMM].prot_sect |= ecc_mask | cp->pmd;
650+	mem_types[MT_RTVMM].prot_pte |= kern_pgprot;
651+	mem_types[MT_RTVMM_SHARE].prot_sect |= ecc_mask | cp->pmd;
652+	mem_types[MT_RTVMM_SHARE].prot_pte |= kern_pgprot;
653+#endif
654 	mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
655 	mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
656 	mem_types[MT_ROM].prot_sect |= cp->pmd;
657@@ -1152,6 +1175,27 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
658 #endif
659
660 	/*
661+	 * Create mappings for RT-Thread VMM and it's shared memory with Linux
662+	 */
663+#ifdef CONFIG_ARM_VMM
664+	/* the TEXCB attribute is not right yet */
665+	/* shared memory region comes first */
666+	map.pfn = __phys_to_pfn(virt_to_phys((void*)HOST_VMM_ADDR_BEGIN));
667+	map.virtual = HOST_VMM_ADDR_BEGIN;
668+	map.length = CONFIG_RTVMM_SHARED_SIZE;
669+	map.type = MT_RTVMM_SHARE;
670+	create_mapping(&map);
671+
672+	/* vmm private region comes next */
673+	map.pfn = __phys_to_pfn(virt_to_phys((void*)HOST_VMM_ADDR_BEGIN
674+					     + CONFIG_RTVMM_SHARED_SIZE));
675+	map.virtual = HOST_VMM_ADDR_BEGIN + CONFIG_RTVMM_SHARED_SIZE;
676+	map.length = CONFIG_HOST_VMM_SIZE - CONFIG_RTVMM_SHARED_SIZE;
677+	map.type = MT_RTVMM;
678+	create_mapping(&map);
679+#endif
680+
681+	/*
682 	 * Create a mapping for the machine vectors at the high-vectors
683 	 * location (0xffff0000).  If we aren't using high-vectors, also
684 	 * create a mapping at the low-vectors virtual address.
685diff --git a/arch/arm/vmm/Kconfig b/arch/arm/vmm/Kconfig
686new file mode 100644
687index 0000000..d852056
688--- /dev/null
689+++ b/arch/arm/vmm/Kconfig
690@@ -0,0 +1,49 @@
691+menu "RT-Thread VMM Features"
692+
693+# ARM-VMM
694+config ARM_VMM
695+	bool "Support RT-Thread VMM on ARM Cortex-A8"
696+	depends on MACH_REALVIEW_PBA8
697+	help
698+	  RT-Thread VMM implementation on ARM Cortex-A8
699+
700+	  Say Y if you want support for the RT-Thread VMM.
701+	  Otherwise, say N.
702+
703+if SOC_AM33XX
704+config HOST_VMM_ADDR_END
705+	hex "End address of VMM"
706+	depends on ARM_VMM
707+	default 0xE0000000
708+	help
709+	  The end address of VMM space. Normally, it's the
710+	  end address of DDR memory.
711+endif
712+
713+if MACH_REALVIEW_PBA8
714+config HOST_VMM_ADDR_END
715+	hex "End address of VMM"
716+	depends on ARM_VMM
717+	default 0xE0000000
718+	help
719+	  The end address of VMM space. Normally, it's the
720+	  end address of DDR memory.
721+endif
722+
723+config HOST_VMM_SIZE
724+	hex "Size of VMM space"
725+	depends on ARM_VMM
726+	default 0x400000
727+	help
728+	  The size of VMM space.
729+
730+config RTVMM_SHARED_SIZE
731+	hex "Size of shared memory space between rt-vmm and Linux"
732+	depends on ARM_VMM
733+	default 0x100000
734+	help
735+	  The size of shared memory space between rt-vmm and Linux. This shared
736+	  space is within the total size of the HOST_VMM_SIZE. So it is should
737+	  be smaller than HOST_VMM_SIZE.
738+
739+endmenu
740diff --git a/arch/arm/vmm/Makefile b/arch/arm/vmm/Makefile
741new file mode 100644
742index 0000000..127e43a
743--- /dev/null
744+++ b/arch/arm/vmm/Makefile
745@@ -0,0 +1,10 @@
746+#
747+# Makefile for the linux arm-vmm
748+#
749+
750+obj-$(CONFIG_ARM_VMM) += vmm.o vmm_traps.o vmm_virq.o
751+
752+ifeq ($(CONFIG_ARM_VMM),y)
753+obj-$(CONFIG_SOC_AM33XX)		 += am33xx/softirq.o am33xx/virq.o
754+obj-$(CONFIG_MACH_REALVIEW_PBA8) += realview_a8/softirq.o
755+endif
756diff --git a/arch/arm/vmm/README b/arch/arm/vmm/README
757new file mode 100644
758index 0000000..24f1b42
759--- /dev/null
760+++ b/arch/arm/vmm/README
761@@ -0,0 +1 @@
762+Linux VMM kernel routines
763diff --git a/arch/arm/vmm/am33xx/intc.h b/arch/arm/vmm/am33xx/intc.h
764new file mode 100644
765index 0000000..6c24f8d
766--- /dev/null
767+++ b/arch/arm/vmm/am33xx/intc.h
768@@ -0,0 +1,13 @@
769+#ifndef __INTC_H__
770+#define __INTC_H__
771+
772+#define OMAP34XX_IC_BASE	0x48200000
773+
774+#define INTC_SIR_SET0		0x0090
775+#define INTC_MIR_CLEAR0		0x0088
776+
777+#define OMAP2_L4_IO_OFFSET	0xb2000000
778+#define OMAP2_L4_IO_ADDRESS(pa)	IOMEM((pa) + OMAP2_L4_IO_OFFSET) /* L4 */
779+#define OMAP3_IRQ_BASE		OMAP2_L4_IO_ADDRESS(OMAP34XX_IC_BASE)
780+
781+#endif
782diff --git a/arch/arm/vmm/am33xx/softirq.c b/arch/arm/vmm/am33xx/softirq.c
783new file mode 100644
784index 0000000..5648496
785--- /dev/null
786+++ b/arch/arm/vmm/am33xx/softirq.c
787@@ -0,0 +1,14 @@
788+#include <linux/kernel.h>
789+#include <linux/module.h>
790+#include <asm/io.h>
791+
792+#include <vmm/vmm.h>
793+#include "../vmm_virhw.h"
794+#include "intc.h"
795+
796+void vmm_raise_softirq(int irq)
797+{
798+	writel_relaxed(1 << (irq % 32),
799+		OMAP3_IRQ_BASE + INTC_SIR_SET0 + (irq / 32) * 4);
800+}
801+EXPORT_SYMBOL(vmm_raise_softirq);
802diff --git a/arch/arm/vmm/am33xx/virq.c b/arch/arm/vmm/am33xx/virq.c
803new file mode 100644
804index 0000000..4ef7671
805--- /dev/null
806+++ b/arch/arm/vmm/am33xx/virq.c
807@@ -0,0 +1,48 @@
808+#include <linux/kernel.h>
809+#include <linux/module.h>
810+#include <linux/irqdomain.h>
811+
812+#include <asm/io.h>
813+#include <asm/irq.h>
814+
815+#include <vmm/vmm.h>
816+#include "../vmm_virhw.h"
817+#include "intc.h"
818+
819+void vmm_irq_handle(void __iomem *base_addr, struct irq_domain *domain,
820+				 struct pt_regs *regs)
821+{
822+	unsigned long flags;
823+	struct vmm_context* _vmm_context;
824+
825+	_vmm_context = vmm_context_get();
826+
827+	while (_vmm_context->virq_pended) {
828+		int index;
829+
830+		flags = vmm_irq_save();
831+		_vmm_context->virq_pended = 0;
832+		vmm_irq_restore(flags);
833+
834+		/* get the pending interrupt */
835+		for (index = 0; index < IRQS_NR_32; index++) {
836+			int pdbit;
837+
838+			for (pdbit = __builtin_ffs(_vmm_context->virq_pending[index]);
839+			     pdbit != 0;
840+			     pdbit = __builtin_ffs(_vmm_context->virq_pending[index])) {
841+				unsigned long inner_flag;
842+				int irqnr;
843+
844+				pdbit--;
845+
846+				inner_flag = vmm_irq_save();
847+				_vmm_context->virq_pending[index] &= ~(1 << pdbit);
848+				vmm_irq_restore(inner_flag);
849+
850+				irqnr = irq_find_mapping(domain, pdbit + index * 32);
851+				handle_IRQ(irqnr, regs);
852+			}
853+		}
854+	}
855+}
856diff --git a/arch/arm/vmm/realview_a8/softirq.c b/arch/arm/vmm/realview_a8/softirq.c
857new file mode 100644
858index 0000000..a52b79c7
859--- /dev/null
860+++ b/arch/arm/vmm/realview_a8/softirq.c
861@@ -0,0 +1,12 @@
862+#include <linux/kernel.h>
863+#include <linux/module.h>
864+#include <asm/io.h>
865+#include <asm/hardware/gic.h>
866+
867+#include <vmm/vmm.h>
868+
869+void vmm_raise_softirq(int irq)
870+{
871+	gic_raise_softirq(cpumask_of(0),  irq);
872+}
873+EXPORT_SYMBOL(vmm_raise_softirq);
874diff --git a/arch/arm/vmm/vmm.c b/arch/arm/vmm/vmm.c
875new file mode 100644
876index 0000000..3b1d202
877--- /dev/null
878+++ b/arch/arm/vmm/vmm.c
879@@ -0,0 +1,32 @@
880+#include <linux/kernel.h>
881+#include <linux/module.h>
882+
883+#include <vmm/vmm.h>
884+
885+struct vmm_context* _vmm_context = NULL;
886+int vmm_status = 0;
887+EXPORT_SYMBOL(vmm_status);
888+
889+void vmm_set_status(int status)
890+{
891+	vmm_status = status;
892+}
893+EXPORT_SYMBOL(vmm_set_status);
894+
895+int vmm_get_status(void)
896+{
897+	return vmm_status;
898+}
899+EXPORT_SYMBOL(vmm_get_status);
900+
901+void vmm_context_init(void* context_addr)
902+{
903+	_vmm_context = (struct vmm_context*)context_addr;
904+}
905+EXPORT_SYMBOL(vmm_context_init);
906+
907+struct vmm_context* vmm_context_get(void)
908+{
909+	return _vmm_context;
910+}
911+EXPORT_SYMBOL(vmm_context_get);
912diff --git a/arch/arm/vmm/vmm_traps.c b/arch/arm/vmm/vmm_traps.c
913new file mode 100644
914index 0000000..def0d90
915--- /dev/null
916+++ b/arch/arm/vmm/vmm_traps.c
917@@ -0,0 +1,37 @@
918+#include <linux/kernel.h>
919+#include <linux/module.h>
920+#include <asm/traps.h>
921+#include <asm/cp15.h>
922+#include <asm/cacheflush.h>
923+
924+void trap_set_vector(void *start, unsigned int length)
925+{
926+	unsigned char *ptr;
927+	unsigned char *vector;
928+
929+	ptr = start;
930+	vector = (unsigned char*)vectors_page;
931+
932+	/* only set IRQ and FIQ */
933+#if defined(CONFIG_CPU_USE_DOMAINS)
934+	/* IRQ */
935+	memcpy((void *)0xffff0018, (void*)(ptr + 0x18), 4);
936+	memcpy((void *)(0xffff0018 + 0x20), (void*)(ptr + 0x18 + 0x20), 4);
937+
938+	/* FIQ */
939+	memcpy((void *)0xffff001C, (void*)(ptr + 0x1C), 4);
940+	memcpy((void *)(0xffff001C + 0x20), (void*)(ptr + 0x1C + 0x20), 4);
941+#else
942+	/* IRQ */
943+	memcpy(vector + 0x18, (void*)(ptr + 0x18), 4);
944+	memcpy(vector + 0x18 + 0x20, (void*)(ptr + 0x18 + 0x20), 4);
945+
946+	/* FIQ */
947+	memcpy(vector + 0x1C, (void*)(ptr + 0x1C), 4);
948+	memcpy(vector + 0x1C + 0x20, (void*)(ptr + 0x1C + 0x20), 4);
949+#endif
950+	flush_icache_range(0xffff0000, 0xffff0000 + length);
951+	if (!vectors_high())
952+		flush_icache_range(0x00, 0x00 + length);
953+}
954+EXPORT_SYMBOL(trap_set_vector);
955diff --git a/arch/arm/vmm/vmm_virhw.h b/arch/arm/vmm/vmm_virhw.h
956new file mode 100644
957index 0000000..363cc6e
958--- /dev/null
959+++ b/arch/arm/vmm/vmm_virhw.h
960@@ -0,0 +1,59 @@
961+#ifndef __VMM_VIRTHWH__
962+#define __VMM_VIRTHWH__
963+
964+#define REALVIEW_NR_IRQS        96
965+#define IRQS_NR_32              ((REALVIEW_NR_IRQS + 31)/32)
966+#define RTT_VMM_IRQ_TRIGGER     10
967+
968+struct vmm_context
969+{
970+	/* the status of vGuest irq */
971+	volatile unsigned long virq_status;
972+
973+	/* has interrupt pended on vGuest OS IRQ */
974+	volatile unsigned long virq_pended;
975+
976+	/* pending interrupt for vGuest OS */
977+	volatile unsigned long virq_pending[IRQS_NR_32];
978+};
979+
980+/* IRQ operation under VMM */
981+static inline unsigned long vmm_irq_save(void)
982+{
983+	unsigned long flags;
984+
985+	asm volatile(
986+		"	mrs	%0, cpsr	@ arch_local_irq_save\n"
987+		"	cpsid	i"
988+		: "=r" (flags) : : "memory", "cc");
989+	return flags;
990+}
991+
992+static inline void vmm_irq_restore(unsigned long flags)
993+{
994+	asm volatile(
995+		"	msr	cpsr_c, %0	@ local_irq_restore"
996+		:
997+		: "r" (flags)
998+		: "memory", "cc");
999+}
1000+
1001+static inline void vmm_irq_enable(void)
1002+{
1003+	asm volatile(
1004+		"	cpsie i			@ arch_local_irq_enable"
1005+		:
1006+		:
1007+		: "memory", "cc");
1008+}
1009+
1010+static inline void vmm_irq_disable(void)
1011+{
1012+	asm volatile(
1013+		"	cpsid i			@ arch_local_irq_disable"
1014+		:
1015+		:
1016+		: "memory", "cc");
1017+}
1018+
1019+#endif
1020diff --git a/arch/arm/vmm/vmm_virq.c b/arch/arm/vmm/vmm_virq.c
1021new file mode 100644
1022index 0000000..85886a2
1023--- /dev/null
1024+++ b/arch/arm/vmm/vmm_virq.c
1025@@ -0,0 +1,183 @@
1026+#include <linux/bug.h>
1027+#include <linux/kernel.h>
1028+#include <linux/module.h>
1029+#include <asm/unwind.h>
1030+
1031+#include <vmm/vmm.h>
1032+
1033+#include "vmm_virhw.h"
1034+
1035+/* VMM use the I bit in SPSR to save the virq status in the isr entry. So warn
1036+ * on the I bit set would gave some false negative result. */
1037+//#define VMM_WARN_ON_I_BIT
1038+
1039+extern struct vmm_context* _vmm_context;
1040+
1041+void vmm_disable_virq(void)
1042+{
1043+	unsigned long flags = vmm_irq_save();
1044+	_vmm_context->virq_status = 0x01;
1045+	vmm_irq_restore(flags);
1046+}
1047+EXPORT_SYMBOL(vmm_disable_virq);
1048+
1049+static void _vmm_raise_on_pended(void)
1050+{
1051+	/* check any interrupt pended in vIRQ */
1052+	if (_vmm_context->virq_pended) {
1053+		/* trigger an soft interrupt */
1054+		vmm_raise_softirq(RTT_VMM_IRQ_TRIGGER);
1055+		return;
1056+	}
1057+
1058+#if 0
1059+	int i;
1060+	for (i = 0; i < ARRAY_SIZE(_vmm_context->virq_pending); i++) {
1061+		if (_vmm_context->virq_pending[i]) {
1062+			_vmm_context->virq_pended = 1;
1063+			pr_info("\n");
1064+			vmm_raise_softirq(RTT_VMM_IRQ_TRIGGER);
1065+			return;
1066+		}
1067+	}
1068+#endif
1069+}
1070+
1071+void vmm_enable_virq(void)
1072+{
1073+	unsigned long flags = vmm_irq_save();
1074+	_vmm_context->virq_status = 0x00;
1075+	_vmm_raise_on_pended();
1076+	vmm_irq_restore(flags);
1077+}
1078+EXPORT_SYMBOL(vmm_enable_virq);
1079+
1080+unsigned long vmm_return_virq(void)
1081+{
1082+	unsigned long flags;
1083+	unsigned long level;
1084+
1085+	level =  vmm_irq_save();
1086+	flags = _vmm_context->virq_status;
1087+	vmm_irq_restore(level);
1088+
1089+	return flags;
1090+}
1091+EXPORT_SYMBOL(vmm_return_virq);
1092+
1093+unsigned long vmm_save_virq(void)
1094+{
1095+	int status;
1096+	unsigned long flags = vmm_irq_save();
1097+
1098+	status = _vmm_context->virq_status;
1099+	_vmm_context->virq_status = 0x01;
1100+	vmm_irq_restore(flags);
1101+
1102+	return status;
1103+}
1104+EXPORT_SYMBOL(vmm_save_virq);
1105+
1106+void vmm_restore_virq(unsigned long flags)
1107+{
1108+	unsigned long level;
1109+
1110+	level =  vmm_irq_save();
1111+	_vmm_context->virq_status = flags;
1112+	if (_vmm_context->virq_status == 0)
1113+	{
1114+		_vmm_raise_on_pended();
1115+	}
1116+	vmm_irq_restore(level);
1117+}
1118+EXPORT_SYMBOL(vmm_restore_virq);
1119+
1120+unsigned long vmm_save_virq_spsr_asm(unsigned long spsr, struct pt_regs *regs)
1121+{
1122+	if (vmm_status) {
1123+		if (_vmm_context->virq_status)
1124+			return spsr | PSR_I_BIT;
1125+	}
1126+	return spsr;
1127+}
1128+
1129+void irq_enable_asm(void)
1130+{
1131+	if (vmm_status) {
1132+		vmm_enable_virq();
1133+	} else {
1134+		asm volatile("cpsie i" : : : "memory", "cc");
1135+	}
1136+}
1137+
1138+void irq_disable_asm(void)
1139+{
1140+	if (vmm_status) {
1141+		vmm_disable_virq();
1142+	} else {
1143+		asm volatile("cpsid i" : : : "memory", "cc");
1144+	}
1145+}
1146+
1147+/* should be called when the guest entering the state that the IRQ is disabled
1148+ * by hardware, for example, entering SVC, PABT, DABT mode.
1149+ *
1150+ * It will the open the hardware IRQ, virtual IRQ remain unchanged.
1151+ */
1152+void vmm_switch_nohwirq_to_novirq(void)
1153+{
1154+	if (vmm_status) {
1155+		vmm_disable_virq();
1156+		asm volatile("cpsie i" : : : "memory", "cc");
1157+	}
1158+}
1159+
1160+unsigned long vmm_restore_virq_asm(unsigned long spsr)
1161+{
1162+	if (vmm_status) {
1163+#ifdef VMM_WARN_ON_I_BIT
1164+		WARN(spsr & PSR_I_BIT, "return to svc mode with I in SPSR set\n");
1165+#endif
1166+		vmm_restore_virq(!!(spsr & PSR_I_BIT));
1167+		return spsr & ~PSR_I_BIT;
1168+	} else {
1169+		return spsr;
1170+	}
1171+}
1172+
1173+void vmm_on_ret_to_usr(unsigned long spsr)
1174+{
1175+	if (vmm_status) {
1176+#ifdef VMM_WARN_ON_I_BIT
1177+		WARN(spsr & PSR_I_BIT, "return to user mode with I in SPSR set\n");
1178+#endif
1179+		vmm_enable_virq();
1180+	}
1181+}
1182+
1183+void vmm_on_svc_exit_irq(unsigned long spsr)
1184+{
1185+	if (vmm_status) {
1186+#ifdef VMM_WARN_ON_I_BIT
1187+		WARN(spsr & PSR_I_BIT, "exit IRQ with I in SPSR set\n");
1188+#endif
1189+		vmm_enable_virq();
1190+	}
1191+}
1192+
1193+void vmm_dump_irq(void)
1194+{
1195+	int i;
1196+	unsigned long cpsr;
1197+
1198+	asm volatile ("mrs %0, cpsr": "=r"(cpsr));
1199+
1200+	printk("status: %08lx, pended: %08lx, cpsr: %08lx\n",
1201+	       _vmm_context->virq_status, _vmm_context->virq_pended, cpsr);
1202+	printk("pending: ");
1203+	for (i = 0; i < ARRAY_SIZE(_vmm_context->virq_pending); i++) {
1204+		printk("%08lx, ", _vmm_context->virq_pending[i]);
1205+	}
1206+	printk("\n");
1207+}
1208+
1209--
12101.8.4
1211
1212