xref: /aosp_15_r20/trusty/kernel/lib/sm/arch/arm/entry.S (revision 344aa361028b423587d4ef3fa52a23d194628137)
1/*
2 * Copyright (c) 2013, Google Inc. All rights reserved
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#include <asm.h>
25#include <lib/sm/monitor.h>
26#include <lib/sm/smcall.h>
27#include <lib/sm/sm_err.h>
28
29#include <kernel/vm.h>
30
31#if !LIB_SM_CUSTOM_SCHED_NONSECURE
32/* sm_sched_nonsecure(uint32_t retval, struct smc32_args *args) */
33FUNCTION(sm_sched_nonsecure)
34	push	{r4-r7}
35	push	{r1, lr}
36	mov	r1, r0
37
38#if LIB_SM_WITH_PLATFORM_NS_RETURN
39	bl	platform_ns_return
40#else
41	ldr	r0, =SMC_SC_NS_RETURN
42	smc	#0
43#endif
44
45	pop	{r12, lr}
46
47	/* Write struct smc32_args (top bits of client_id are left unchanged */
48	stmia	r12, {r0-r3, r7}
49
50	pop	{r4-r7}
51
52	bx	lr
53#endif
54
55#if LIB_SM_WITH_PLATFORM_RESET
56FUNCTION(sm_reset)
57#else
58FUNCTION(platform_reset)
59#endif
60#if WITH_LIB_SM_MONITOR
61	cps	#MODE_MON
62	bl	monitor_reset
63	cps	#MODE_SVC
64#endif
65#if WITH_SMP
66	/* figure out our cpu number */
67	mrc     p15, 0, ip, c0, c0, 5 /* read MPIDR */
68
69	/* mask off the bottom bits to test cluster number:cpu number */
70	ubfx    ip, ip, #0, #SMP_CPU_ID_BITS
71
72	/* if we're not cpu 0:0, jump back to arm_reset */
73	cmp     ip, #0
74	bne     arm_reset
75#endif
76
77.Lphys_offset:
78	adr	sp, .Lphys_offset	/* sp = paddr */
79	ldr	ip, =.Lphys_offset	/* ip = vaddr */
80	sub	ip, sp, ip		/* ip = phys_offset */
81
82	/* patch mmu_initial_mappings table */
83	ldr	r5, =mmu_initial_mappings
84	add	r5, r5, ip	/* r5 = _mmu_initial_mappings paddr */
85
86.Lnext_entry:
87	/* if size == 0, end of list */
88	ldr	r4, [r5, #__MMU_INITIAL_MAPPING_SIZE_OFFSET]
89	cmp	r4, #0
90	beq	.Lall_done
91
92	ldr	r4, [r5, #__MMU_INITIAL_MAPPING_FLAGS_OFFSET]
93	tst	r4, #MMU_INITIAL_MAPPING_FLAG_DYNAMIC
94	addeq	r5, #__MMU_INITIAL_MAPPING_SIZE
95	beq	.Lnext_entry
96
97	/* patching dynamic entry: r5 - points to entry to patch */
98	/* r0 is memsize passed in by the bootloader */
99
100	/* update size field of mmu_initial_mappings struct */
101	str	r0, [r5, #__MMU_INITIAL_MAPPING_SIZE_OFFSET]
102
103	/* calculate phys mem base */
104	ldr	r4, =_start	/* r4 = _start vaddr */
105	add     r4, r4, ip      /* r4 = _start paddr */
106
107	/* update phys field of mmu_initial_mappings struct */
108	str	r4, [r5, #__MMU_INITIAL_MAPPING_PHYS_OFFSET]
109
110.Lall_done:
111	b	arm_reset
112