1*54fd6939SJiyong Park/* 2*54fd6939SJiyong Park * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved. 3*54fd6939SJiyong Park * 4*54fd6939SJiyong Park * SPDX-License-Identifier: BSD-3-Clause 5*54fd6939SJiyong Park */ 6*54fd6939SJiyong Park 7*54fd6939SJiyong Park#include <arch.h> 8*54fd6939SJiyong Park#include <asm_macros.S> 9*54fd6939SJiyong Park#include <common/bl_common.h> 10*54fd6939SJiyong Park#include <common/runtime_svc.h> 11*54fd6939SJiyong Park#include <context.h> 12*54fd6939SJiyong Park#include <el3_common_macros.S> 13*54fd6939SJiyong Park#include <lib/el3_runtime/cpu_data.h> 14*54fd6939SJiyong Park#include <lib/pmf/aarch32/pmf_asm_macros.S> 15*54fd6939SJiyong Park#include <lib/runtime_instr.h> 16*54fd6939SJiyong Park#include <lib/xlat_tables/xlat_tables_defs.h> 17*54fd6939SJiyong Park#include <smccc_helpers.h> 18*54fd6939SJiyong Park#include <smccc_macros.S> 19*54fd6939SJiyong Park 20*54fd6939SJiyong Park .globl sp_min_vector_table 21*54fd6939SJiyong Park .globl sp_min_entrypoint 22*54fd6939SJiyong Park .globl sp_min_warm_entrypoint 23*54fd6939SJiyong Park .globl sp_min_handle_smc 24*54fd6939SJiyong Park .globl sp_min_handle_fiq 25*54fd6939SJiyong Park 26*54fd6939SJiyong Park#define FIXUP_SIZE ((BL32_LIMIT) - (BL32_BASE)) 27*54fd6939SJiyong Park 28*54fd6939SJiyong Park .macro route_fiq_to_sp_min reg 29*54fd6939SJiyong Park /* ----------------------------------------------------- 30*54fd6939SJiyong Park * FIQs are secure interrupts trapped by Monitor and non 31*54fd6939SJiyong Park * secure is not allowed to mask the FIQs. 32*54fd6939SJiyong Park * ----------------------------------------------------- 33*54fd6939SJiyong Park */ 34*54fd6939SJiyong Park ldcopr \reg, SCR 35*54fd6939SJiyong Park orr \reg, \reg, #SCR_FIQ_BIT 36*54fd6939SJiyong Park bic \reg, \reg, #SCR_FW_BIT 37*54fd6939SJiyong Park stcopr \reg, SCR 38*54fd6939SJiyong Park .endm 39*54fd6939SJiyong Park 40*54fd6939SJiyong Park .macro clrex_on_monitor_entry 41*54fd6939SJiyong Park#if (ARM_ARCH_MAJOR == 7) 42*54fd6939SJiyong Park /* 43*54fd6939SJiyong Park * ARMv7 architectures need to clear the exclusive access when 44*54fd6939SJiyong Park * entering Monitor mode. 45*54fd6939SJiyong Park */ 46*54fd6939SJiyong Park clrex 47*54fd6939SJiyong Park#endif 48*54fd6939SJiyong Park .endm 49*54fd6939SJiyong Park 50*54fd6939SJiyong Parkvector_base sp_min_vector_table 51*54fd6939SJiyong Park b sp_min_entrypoint 52*54fd6939SJiyong Park b plat_panic_handler /* Undef */ 53*54fd6939SJiyong Park b sp_min_handle_smc /* Syscall */ 54*54fd6939SJiyong Park b plat_panic_handler /* Prefetch abort */ 55*54fd6939SJiyong Park b plat_panic_handler /* Data abort */ 56*54fd6939SJiyong Park b plat_panic_handler /* Reserved */ 57*54fd6939SJiyong Park b plat_panic_handler /* IRQ */ 58*54fd6939SJiyong Park b sp_min_handle_fiq /* FIQ */ 59*54fd6939SJiyong Park 60*54fd6939SJiyong Park 61*54fd6939SJiyong Park/* 62*54fd6939SJiyong Park * The Cold boot/Reset entrypoint for SP_MIN 63*54fd6939SJiyong Park */ 64*54fd6939SJiyong Parkfunc sp_min_entrypoint 65*54fd6939SJiyong Park#if !RESET_TO_SP_MIN 66*54fd6939SJiyong Park /* --------------------------------------------------------------- 67*54fd6939SJiyong Park * Preceding bootloader has populated r0 with a pointer to a 68*54fd6939SJiyong Park * 'bl_params_t' structure & r1 with a pointer to platform 69*54fd6939SJiyong Park * specific structure 70*54fd6939SJiyong Park * --------------------------------------------------------------- 71*54fd6939SJiyong Park */ 72*54fd6939SJiyong Park mov r9, r0 73*54fd6939SJiyong Park mov r10, r1 74*54fd6939SJiyong Park mov r11, r2 75*54fd6939SJiyong Park mov r12, r3 76*54fd6939SJiyong Park 77*54fd6939SJiyong Park /* --------------------------------------------------------------------- 78*54fd6939SJiyong Park * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches 79*54fd6939SJiyong Park * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot 80*54fd6939SJiyong Park * and primary/secondary CPU logic should not be executed in this case. 81*54fd6939SJiyong Park * 82*54fd6939SJiyong Park * Also, assume that the previous bootloader has already initialised the 83*54fd6939SJiyong Park * SCTLR, including the CPU endianness, and has initialised the memory. 84*54fd6939SJiyong Park * --------------------------------------------------------------------- 85*54fd6939SJiyong Park */ 86*54fd6939SJiyong Park el3_entrypoint_common \ 87*54fd6939SJiyong Park _init_sctlr=0 \ 88*54fd6939SJiyong Park _warm_boot_mailbox=0 \ 89*54fd6939SJiyong Park _secondary_cold_boot=0 \ 90*54fd6939SJiyong Park _init_memory=0 \ 91*54fd6939SJiyong Park _init_c_runtime=1 \ 92*54fd6939SJiyong Park _exception_vectors=sp_min_vector_table \ 93*54fd6939SJiyong Park _pie_fixup_size=FIXUP_SIZE 94*54fd6939SJiyong Park 95*54fd6939SJiyong Park /* --------------------------------------------------------------------- 96*54fd6939SJiyong Park * Relay the previous bootloader's arguments to the platform layer 97*54fd6939SJiyong Park * --------------------------------------------------------------------- 98*54fd6939SJiyong Park */ 99*54fd6939SJiyong Park#else 100*54fd6939SJiyong Park /* --------------------------------------------------------------------- 101*54fd6939SJiyong Park * For RESET_TO_SP_MIN systems which have a programmable reset address, 102*54fd6939SJiyong Park * sp_min_entrypoint() is executed only on the cold boot path so we can 103*54fd6939SJiyong Park * skip the warm boot mailbox mechanism. 104*54fd6939SJiyong Park * --------------------------------------------------------------------- 105*54fd6939SJiyong Park */ 106*54fd6939SJiyong Park el3_entrypoint_common \ 107*54fd6939SJiyong Park _init_sctlr=1 \ 108*54fd6939SJiyong Park _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 109*54fd6939SJiyong Park _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 110*54fd6939SJiyong Park _init_memory=1 \ 111*54fd6939SJiyong Park _init_c_runtime=1 \ 112*54fd6939SJiyong Park _exception_vectors=sp_min_vector_table \ 113*54fd6939SJiyong Park _pie_fixup_size=FIXUP_SIZE 114*54fd6939SJiyong Park 115*54fd6939SJiyong Park /* --------------------------------------------------------------------- 116*54fd6939SJiyong Park * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader 117*54fd6939SJiyong Park * to run so there's no argument to relay from a previous bootloader. 118*54fd6939SJiyong Park * Zero the arguments passed to the platform layer to reflect that. 119*54fd6939SJiyong Park * --------------------------------------------------------------------- 120*54fd6939SJiyong Park */ 121*54fd6939SJiyong Park mov r9, #0 122*54fd6939SJiyong Park mov r10, #0 123*54fd6939SJiyong Park mov r11, #0 124*54fd6939SJiyong Park mov r12, #0 125*54fd6939SJiyong Park 126*54fd6939SJiyong Park#endif /* RESET_TO_SP_MIN */ 127*54fd6939SJiyong Park 128*54fd6939SJiyong Park#if SP_MIN_WITH_SECURE_FIQ 129*54fd6939SJiyong Park route_fiq_to_sp_min r4 130*54fd6939SJiyong Park#endif 131*54fd6939SJiyong Park 132*54fd6939SJiyong Park mov r0, r9 133*54fd6939SJiyong Park mov r1, r10 134*54fd6939SJiyong Park mov r2, r11 135*54fd6939SJiyong Park mov r3, r12 136*54fd6939SJiyong Park bl sp_min_early_platform_setup2 137*54fd6939SJiyong Park bl sp_min_plat_arch_setup 138*54fd6939SJiyong Park 139*54fd6939SJiyong Park /* Jump to the main function */ 140*54fd6939SJiyong Park bl sp_min_main 141*54fd6939SJiyong Park 142*54fd6939SJiyong Park /* ------------------------------------------------------------- 143*54fd6939SJiyong Park * Clean the .data & .bss sections to main memory. This ensures 144*54fd6939SJiyong Park * that any global data which was initialised by the primary CPU 145*54fd6939SJiyong Park * is visible to secondary CPUs before they enable their data 146*54fd6939SJiyong Park * caches and participate in coherency. 147*54fd6939SJiyong Park * ------------------------------------------------------------- 148*54fd6939SJiyong Park */ 149*54fd6939SJiyong Park ldr r0, =__DATA_START__ 150*54fd6939SJiyong Park ldr r1, =__DATA_END__ 151*54fd6939SJiyong Park sub r1, r1, r0 152*54fd6939SJiyong Park bl clean_dcache_range 153*54fd6939SJiyong Park 154*54fd6939SJiyong Park ldr r0, =__BSS_START__ 155*54fd6939SJiyong Park ldr r1, =__BSS_END__ 156*54fd6939SJiyong Park sub r1, r1, r0 157*54fd6939SJiyong Park bl clean_dcache_range 158*54fd6939SJiyong Park 159*54fd6939SJiyong Park bl smc_get_next_ctx 160*54fd6939SJiyong Park 161*54fd6939SJiyong Park /* r0 points to `smc_ctx_t` */ 162*54fd6939SJiyong Park /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 163*54fd6939SJiyong Park b sp_min_exit 164*54fd6939SJiyong Parkendfunc sp_min_entrypoint 165*54fd6939SJiyong Park 166*54fd6939SJiyong Park 167*54fd6939SJiyong Park/* 168*54fd6939SJiyong Park * SMC handling function for SP_MIN. 169*54fd6939SJiyong Park */ 170*54fd6939SJiyong Parkfunc sp_min_handle_smc 171*54fd6939SJiyong Park /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 172*54fd6939SJiyong Park str lr, [sp, #SMC_CTX_LR_MON] 173*54fd6939SJiyong Park 174*54fd6939SJiyong Park#if ENABLE_RUNTIME_INSTRUMENTATION 175*54fd6939SJiyong Park /* 176*54fd6939SJiyong Park * Read the timestamp value and store it on top of the C runtime stack. 177*54fd6939SJiyong Park * The value will be saved to the per-cpu data once the C stack is 178*54fd6939SJiyong Park * available, as a valid stack is needed to call _cpu_data() 179*54fd6939SJiyong Park */ 180*54fd6939SJiyong Park strd r0, r1, [sp, #SMC_CTX_GPREG_R0] 181*54fd6939SJiyong Park ldcopr16 r0, r1, CNTPCT_64 182*54fd6939SJiyong Park ldr lr, [sp, #SMC_CTX_SP_MON] 183*54fd6939SJiyong Park strd r0, r1, [lr, #-8]! 184*54fd6939SJiyong Park str lr, [sp, #SMC_CTX_SP_MON] 185*54fd6939SJiyong Park ldrd r0, r1, [sp, #SMC_CTX_GPREG_R0] 186*54fd6939SJiyong Park#endif 187*54fd6939SJiyong Park 188*54fd6939SJiyong Park smccc_save_gp_mode_regs 189*54fd6939SJiyong Park 190*54fd6939SJiyong Park clrex_on_monitor_entry 191*54fd6939SJiyong Park 192*54fd6939SJiyong Park /* 193*54fd6939SJiyong Park * `sp` still points to `smc_ctx_t`. Save it to a register 194*54fd6939SJiyong Park * and restore the C runtime stack pointer to `sp`. 195*54fd6939SJiyong Park */ 196*54fd6939SJiyong Park mov r2, sp /* handle */ 197*54fd6939SJiyong Park ldr sp, [r2, #SMC_CTX_SP_MON] 198*54fd6939SJiyong Park 199*54fd6939SJiyong Park#if ENABLE_RUNTIME_INSTRUMENTATION 200*54fd6939SJiyong Park /* Save handle to a callee saved register */ 201*54fd6939SJiyong Park mov r6, r2 202*54fd6939SJiyong Park 203*54fd6939SJiyong Park /* 204*54fd6939SJiyong Park * Restore the timestamp value and store it in per-cpu data. The value 205*54fd6939SJiyong Park * will be extracted from per-cpu data by the C level SMC handler and 206*54fd6939SJiyong Park * saved to the PMF timestamp region. 207*54fd6939SJiyong Park */ 208*54fd6939SJiyong Park ldrd r4, r5, [sp], #8 209*54fd6939SJiyong Park bl _cpu_data 210*54fd6939SJiyong Park strd r4, r5, [r0, #CPU_DATA_PMF_TS0_OFFSET] 211*54fd6939SJiyong Park 212*54fd6939SJiyong Park /* Restore handle */ 213*54fd6939SJiyong Park mov r2, r6 214*54fd6939SJiyong Park#endif 215*54fd6939SJiyong Park 216*54fd6939SJiyong Park ldr r0, [r2, #SMC_CTX_SCR] 217*54fd6939SJiyong Park and r3, r0, #SCR_NS_BIT /* flags */ 218*54fd6939SJiyong Park 219*54fd6939SJiyong Park /* Switch to Secure Mode*/ 220*54fd6939SJiyong Park bic r0, #SCR_NS_BIT 221*54fd6939SJiyong Park stcopr r0, SCR 222*54fd6939SJiyong Park isb 223*54fd6939SJiyong Park 224*54fd6939SJiyong Park ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */ 225*54fd6939SJiyong Park /* Check whether an SMC64 is issued */ 226*54fd6939SJiyong Park tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT) 227*54fd6939SJiyong Park beq 1f 228*54fd6939SJiyong Park /* SMC32 is not detected. Return error back to caller */ 229*54fd6939SJiyong Park mov r0, #SMC_UNK 230*54fd6939SJiyong Park str r0, [r2, #SMC_CTX_GPREG_R0] 231*54fd6939SJiyong Park mov r0, r2 232*54fd6939SJiyong Park b sp_min_exit 233*54fd6939SJiyong Park1: 234*54fd6939SJiyong Park /* SMC32 is detected */ 235*54fd6939SJiyong Park mov r1, #0 /* cookie */ 236*54fd6939SJiyong Park bl handle_runtime_svc 237*54fd6939SJiyong Park 238*54fd6939SJiyong Park /* `r0` points to `smc_ctx_t` */ 239*54fd6939SJiyong Park b sp_min_exit 240*54fd6939SJiyong Parkendfunc sp_min_handle_smc 241*54fd6939SJiyong Park 242*54fd6939SJiyong Park/* 243*54fd6939SJiyong Park * Secure Interrupts handling function for SP_MIN. 244*54fd6939SJiyong Park */ 245*54fd6939SJiyong Parkfunc sp_min_handle_fiq 246*54fd6939SJiyong Park#if !SP_MIN_WITH_SECURE_FIQ 247*54fd6939SJiyong Park b plat_panic_handler 248*54fd6939SJiyong Park#else 249*54fd6939SJiyong Park /* FIQ has a +4 offset for lr compared to preferred return address */ 250*54fd6939SJiyong Park sub lr, lr, #4 251*54fd6939SJiyong Park /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 252*54fd6939SJiyong Park str lr, [sp, #SMC_CTX_LR_MON] 253*54fd6939SJiyong Park 254*54fd6939SJiyong Park smccc_save_gp_mode_regs 255*54fd6939SJiyong Park 256*54fd6939SJiyong Park clrex_on_monitor_entry 257*54fd6939SJiyong Park 258*54fd6939SJiyong Park /* load run-time stack */ 259*54fd6939SJiyong Park mov r2, sp 260*54fd6939SJiyong Park ldr sp, [r2, #SMC_CTX_SP_MON] 261*54fd6939SJiyong Park 262*54fd6939SJiyong Park /* Switch to Secure Mode */ 263*54fd6939SJiyong Park ldr r0, [r2, #SMC_CTX_SCR] 264*54fd6939SJiyong Park bic r0, #SCR_NS_BIT 265*54fd6939SJiyong Park stcopr r0, SCR 266*54fd6939SJiyong Park isb 267*54fd6939SJiyong Park 268*54fd6939SJiyong Park push {r2, r3} 269*54fd6939SJiyong Park bl sp_min_fiq 270*54fd6939SJiyong Park pop {r0, r3} 271*54fd6939SJiyong Park 272*54fd6939SJiyong Park b sp_min_exit 273*54fd6939SJiyong Park#endif 274*54fd6939SJiyong Parkendfunc sp_min_handle_fiq 275*54fd6939SJiyong Park 276*54fd6939SJiyong Park/* 277*54fd6939SJiyong Park * The Warm boot entrypoint for SP_MIN. 278*54fd6939SJiyong Park */ 279*54fd6939SJiyong Parkfunc sp_min_warm_entrypoint 280*54fd6939SJiyong Park#if ENABLE_RUNTIME_INSTRUMENTATION 281*54fd6939SJiyong Park /* 282*54fd6939SJiyong Park * This timestamp update happens with cache off. The next 283*54fd6939SJiyong Park * timestamp collection will need to do cache maintenance prior 284*54fd6939SJiyong Park * to timestamp update. 285*54fd6939SJiyong Park */ 286*54fd6939SJiyong Park pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR 287*54fd6939SJiyong Park ldcopr16 r2, r3, CNTPCT_64 288*54fd6939SJiyong Park strd r2, r3, [r0] 289*54fd6939SJiyong Park#endif 290*54fd6939SJiyong Park /* 291*54fd6939SJiyong Park * On the warm boot path, most of the EL3 initialisations performed by 292*54fd6939SJiyong Park * 'el3_entrypoint_common' must be skipped: 293*54fd6939SJiyong Park * 294*54fd6939SJiyong Park * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by 295*54fd6939SJiyong Park * programming the reset address do we need to initialied the SCTLR. 296*54fd6939SJiyong Park * In other cases, we assume this has been taken care by the 297*54fd6939SJiyong Park * entrypoint code. 298*54fd6939SJiyong Park * 299*54fd6939SJiyong Park * - No need to determine the type of boot, we know it is a warm boot. 300*54fd6939SJiyong Park * 301*54fd6939SJiyong Park * - Do not try to distinguish between primary and secondary CPUs, this 302*54fd6939SJiyong Park * notion only exists for a cold boot. 303*54fd6939SJiyong Park * 304*54fd6939SJiyong Park * - No need to initialise the memory or the C runtime environment, 305*54fd6939SJiyong Park * it has been done once and for all on the cold boot path. 306*54fd6939SJiyong Park */ 307*54fd6939SJiyong Park el3_entrypoint_common \ 308*54fd6939SJiyong Park _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \ 309*54fd6939SJiyong Park _warm_boot_mailbox=0 \ 310*54fd6939SJiyong Park _secondary_cold_boot=0 \ 311*54fd6939SJiyong Park _init_memory=0 \ 312*54fd6939SJiyong Park _init_c_runtime=0 \ 313*54fd6939SJiyong Park _exception_vectors=sp_min_vector_table \ 314*54fd6939SJiyong Park _pie_fixup_size=0 315*54fd6939SJiyong Park 316*54fd6939SJiyong Park /* 317*54fd6939SJiyong Park * We're about to enable MMU and participate in PSCI state coordination. 318*54fd6939SJiyong Park * 319*54fd6939SJiyong Park * The PSCI implementation invokes platform routines that enable CPUs to 320*54fd6939SJiyong Park * participate in coherency. On a system where CPUs are not 321*54fd6939SJiyong Park * cache-coherent without appropriate platform specific programming, 322*54fd6939SJiyong Park * having caches enabled until such time might lead to coherency issues 323*54fd6939SJiyong Park * (resulting from stale data getting speculatively fetched, among 324*54fd6939SJiyong Park * others). Therefore we keep data caches disabled even after enabling 325*54fd6939SJiyong Park * the MMU for such platforms. 326*54fd6939SJiyong Park * 327*54fd6939SJiyong Park * On systems with hardware-assisted coherency, or on single cluster 328*54fd6939SJiyong Park * platforms, such platform specific programming is not required to 329*54fd6939SJiyong Park * enter coherency (as CPUs already are); and there's no reason to have 330*54fd6939SJiyong Park * caches disabled either. 331*54fd6939SJiyong Park */ 332*54fd6939SJiyong Park#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 333*54fd6939SJiyong Park mov r0, #0 334*54fd6939SJiyong Park#else 335*54fd6939SJiyong Park mov r0, #DISABLE_DCACHE 336*54fd6939SJiyong Park#endif 337*54fd6939SJiyong Park bl bl32_plat_enable_mmu 338*54fd6939SJiyong Park 339*54fd6939SJiyong Park#if SP_MIN_WITH_SECURE_FIQ 340*54fd6939SJiyong Park route_fiq_to_sp_min r0 341*54fd6939SJiyong Park#endif 342*54fd6939SJiyong Park 343*54fd6939SJiyong Park bl sp_min_warm_boot 344*54fd6939SJiyong Park bl smc_get_next_ctx 345*54fd6939SJiyong Park /* r0 points to `smc_ctx_t` */ 346*54fd6939SJiyong Park /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 347*54fd6939SJiyong Park 348*54fd6939SJiyong Park#if ENABLE_RUNTIME_INSTRUMENTATION 349*54fd6939SJiyong Park /* Save smc_ctx_t */ 350*54fd6939SJiyong Park mov r5, r0 351*54fd6939SJiyong Park 352*54fd6939SJiyong Park pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI 353*54fd6939SJiyong Park mov r4, r0 354*54fd6939SJiyong Park 355*54fd6939SJiyong Park /* 356*54fd6939SJiyong Park * Invalidate before updating timestamp to ensure previous timestamp 357*54fd6939SJiyong Park * updates on the same cache line with caches disabled are properly 358*54fd6939SJiyong Park * seen by the same core. Without the cache invalidate, the core might 359*54fd6939SJiyong Park * write into a stale cache line. 360*54fd6939SJiyong Park */ 361*54fd6939SJiyong Park mov r1, #PMF_TS_SIZE 362*54fd6939SJiyong Park bl inv_dcache_range 363*54fd6939SJiyong Park 364*54fd6939SJiyong Park ldcopr16 r0, r1, CNTPCT_64 365*54fd6939SJiyong Park strd r0, r1, [r4] 366*54fd6939SJiyong Park 367*54fd6939SJiyong Park /* Restore smc_ctx_t */ 368*54fd6939SJiyong Park mov r0, r5 369*54fd6939SJiyong Park#endif 370*54fd6939SJiyong Park 371*54fd6939SJiyong Park b sp_min_exit 372*54fd6939SJiyong Parkendfunc sp_min_warm_entrypoint 373*54fd6939SJiyong Park 374*54fd6939SJiyong Park/* 375*54fd6939SJiyong Park * The function to restore the registers from SMC context and return 376*54fd6939SJiyong Park * to the mode restored to SPSR. 377*54fd6939SJiyong Park * 378*54fd6939SJiyong Park * Arguments : r0 must point to the SMC context to restore from. 379*54fd6939SJiyong Park */ 380*54fd6939SJiyong Parkfunc sp_min_exit 381*54fd6939SJiyong Park monitor_exit 382*54fd6939SJiyong Parkendfunc sp_min_exit 383