1*54fd6939SJiyong Park/* 2*54fd6939SJiyong Park * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved. 3*54fd6939SJiyong Park * 4*54fd6939SJiyong Park * SPDX-License-Identifier: BSD-3-Clause 5*54fd6939SJiyong Park */ 6*54fd6939SJiyong Park 7*54fd6939SJiyong Park#include <platform_def.h> 8*54fd6939SJiyong Park 9*54fd6939SJiyong Park#include <arch.h> 10*54fd6939SJiyong Park#include <common/bl_common.h> 11*54fd6939SJiyong Park#include <el3_common_macros.S> 12*54fd6939SJiyong Park#include <lib/pmf/aarch64/pmf_asm_macros.S> 13*54fd6939SJiyong Park#include <lib/runtime_instr.h> 14*54fd6939SJiyong Park#include <lib/xlat_tables/xlat_mmu_helpers.h> 15*54fd6939SJiyong Park 16*54fd6939SJiyong Park .globl bl31_entrypoint 17*54fd6939SJiyong Park .globl bl31_warm_entrypoint 18*54fd6939SJiyong Park 19*54fd6939SJiyong Park /* ----------------------------------------------------- 20*54fd6939SJiyong Park * bl31_entrypoint() is the cold boot entrypoint, 21*54fd6939SJiyong Park * executed only by the primary cpu. 22*54fd6939SJiyong Park * ----------------------------------------------------- 23*54fd6939SJiyong Park */ 24*54fd6939SJiyong Park 25*54fd6939SJiyong Parkfunc bl31_entrypoint 26*54fd6939SJiyong Park /* --------------------------------------------------------------- 27*54fd6939SJiyong Park * Stash the previous bootloader arguments x0 - x3 for later use. 28*54fd6939SJiyong Park * --------------------------------------------------------------- 29*54fd6939SJiyong Park */ 30*54fd6939SJiyong Park mov x20, x0 31*54fd6939SJiyong Park mov x21, x1 32*54fd6939SJiyong Park mov x22, x2 33*54fd6939SJiyong Park mov x23, x3 34*54fd6939SJiyong Park 35*54fd6939SJiyong Park#if !RESET_TO_BL31 36*54fd6939SJiyong Park /* --------------------------------------------------------------------- 37*54fd6939SJiyong Park * For !RESET_TO_BL31 systems, only the primary CPU ever reaches 38*54fd6939SJiyong Park * bl31_entrypoint() during the cold boot flow, so the cold/warm boot 39*54fd6939SJiyong Park * and primary/secondary CPU logic should not be executed in this case. 40*54fd6939SJiyong Park * 41*54fd6939SJiyong Park * Also, assume that the previous bootloader has already initialised the 42*54fd6939SJiyong Park * SCTLR_EL3, including the endianness, and has initialised the memory. 43*54fd6939SJiyong Park * --------------------------------------------------------------------- 44*54fd6939SJiyong Park */ 45*54fd6939SJiyong Park el3_entrypoint_common \ 46*54fd6939SJiyong Park _init_sctlr=0 \ 47*54fd6939SJiyong Park _warm_boot_mailbox=0 \ 48*54fd6939SJiyong Park _secondary_cold_boot=0 \ 49*54fd6939SJiyong Park _init_memory=0 \ 50*54fd6939SJiyong Park _init_c_runtime=1 \ 51*54fd6939SJiyong Park _exception_vectors=runtime_exceptions \ 52*54fd6939SJiyong Park _pie_fixup_size=BL31_LIMIT - BL31_BASE 53*54fd6939SJiyong Park#else 54*54fd6939SJiyong Park 55*54fd6939SJiyong Park /* --------------------------------------------------------------------- 56*54fd6939SJiyong Park * For RESET_TO_BL31 systems which have a programmable reset address, 57*54fd6939SJiyong Park * bl31_entrypoint() is executed only on the cold boot path so we can 58*54fd6939SJiyong Park * skip the warm boot mailbox mechanism. 59*54fd6939SJiyong Park * --------------------------------------------------------------------- 60*54fd6939SJiyong Park */ 61*54fd6939SJiyong Park el3_entrypoint_common \ 62*54fd6939SJiyong Park _init_sctlr=1 \ 63*54fd6939SJiyong Park _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 64*54fd6939SJiyong Park _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 65*54fd6939SJiyong Park _init_memory=1 \ 66*54fd6939SJiyong Park _init_c_runtime=1 \ 67*54fd6939SJiyong Park _exception_vectors=runtime_exceptions \ 68*54fd6939SJiyong Park _pie_fixup_size=BL31_LIMIT - BL31_BASE 69*54fd6939SJiyong Park 70*54fd6939SJiyong Park /* --------------------------------------------------------------------- 71*54fd6939SJiyong Park * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so 72*54fd6939SJiyong Park * there's no argument to relay from a previous bootloader. Zero the 73*54fd6939SJiyong Park * arguments passed to the platform layer to reflect that. 74*54fd6939SJiyong Park * --------------------------------------------------------------------- 75*54fd6939SJiyong Park */ 76*54fd6939SJiyong Park mov x20, 0 77*54fd6939SJiyong Park mov x21, 0 78*54fd6939SJiyong Park mov x22, 0 79*54fd6939SJiyong Park mov x23, 0 80*54fd6939SJiyong Park#endif /* RESET_TO_BL31 */ 81*54fd6939SJiyong Park 82*54fd6939SJiyong Park /* -------------------------------------------------------------------- 83*54fd6939SJiyong Park * Perform BL31 setup 84*54fd6939SJiyong Park * -------------------------------------------------------------------- 85*54fd6939SJiyong Park */ 86*54fd6939SJiyong Park mov x0, x20 87*54fd6939SJiyong Park mov x1, x21 88*54fd6939SJiyong Park mov x2, x22 89*54fd6939SJiyong Park mov x3, x23 90*54fd6939SJiyong Park bl bl31_setup 91*54fd6939SJiyong Park 92*54fd6939SJiyong Park#if ENABLE_PAUTH 93*54fd6939SJiyong Park /* -------------------------------------------------------------------- 94*54fd6939SJiyong Park * Program APIAKey_EL1 and enable pointer authentication 95*54fd6939SJiyong Park * -------------------------------------------------------------------- 96*54fd6939SJiyong Park */ 97*54fd6939SJiyong Park bl pauth_init_enable_el3 98*54fd6939SJiyong Park#endif /* ENABLE_PAUTH */ 99*54fd6939SJiyong Park 100*54fd6939SJiyong Park /* -------------------------------------------------------------------- 101*54fd6939SJiyong Park * Jump to main function 102*54fd6939SJiyong Park * -------------------------------------------------------------------- 103*54fd6939SJiyong Park */ 104*54fd6939SJiyong Park bl bl31_main 105*54fd6939SJiyong Park 106*54fd6939SJiyong Park /* -------------------------------------------------------------------- 107*54fd6939SJiyong Park * Clean the .data & .bss sections to main memory. This ensures 108*54fd6939SJiyong Park * that any global data which was initialised by the primary CPU 109*54fd6939SJiyong Park * is visible to secondary CPUs before they enable their data 110*54fd6939SJiyong Park * caches and participate in coherency. 111*54fd6939SJiyong Park * -------------------------------------------------------------------- 112*54fd6939SJiyong Park */ 113*54fd6939SJiyong Park adrp x0, __DATA_START__ 114*54fd6939SJiyong Park add x0, x0, :lo12:__DATA_START__ 115*54fd6939SJiyong Park adrp x1, __DATA_END__ 116*54fd6939SJiyong Park add x1, x1, :lo12:__DATA_END__ 117*54fd6939SJiyong Park sub x1, x1, x0 118*54fd6939SJiyong Park bl clean_dcache_range 119*54fd6939SJiyong Park 120*54fd6939SJiyong Park adrp x0, __BSS_START__ 121*54fd6939SJiyong Park add x0, x0, :lo12:__BSS_START__ 122*54fd6939SJiyong Park adrp x1, __BSS_END__ 123*54fd6939SJiyong Park add x1, x1, :lo12:__BSS_END__ 124*54fd6939SJiyong Park sub x1, x1, x0 125*54fd6939SJiyong Park bl clean_dcache_range 126*54fd6939SJiyong Park 127*54fd6939SJiyong Park b el3_exit 128*54fd6939SJiyong Parkendfunc bl31_entrypoint 129*54fd6939SJiyong Park 130*54fd6939SJiyong Park /* -------------------------------------------------------------------- 131*54fd6939SJiyong Park * This CPU has been physically powered up. It is either resuming from 132*54fd6939SJiyong Park * suspend or has simply been turned on. In both cases, call the BL31 133*54fd6939SJiyong Park * warmboot entrypoint 134*54fd6939SJiyong Park * -------------------------------------------------------------------- 135*54fd6939SJiyong Park */ 136*54fd6939SJiyong Parkfunc bl31_warm_entrypoint 137*54fd6939SJiyong Park#if ENABLE_RUNTIME_INSTRUMENTATION 138*54fd6939SJiyong Park 139*54fd6939SJiyong Park /* 140*54fd6939SJiyong Park * This timestamp update happens with cache off. The next 141*54fd6939SJiyong Park * timestamp collection will need to do cache maintenance prior 142*54fd6939SJiyong Park * to timestamp update. 143*54fd6939SJiyong Park */ 144*54fd6939SJiyong Park pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR 145*54fd6939SJiyong Park mrs x1, cntpct_el0 146*54fd6939SJiyong Park str x1, [x0] 147*54fd6939SJiyong Park#endif 148*54fd6939SJiyong Park 149*54fd6939SJiyong Park /* 150*54fd6939SJiyong Park * On the warm boot path, most of the EL3 initialisations performed by 151*54fd6939SJiyong Park * 'el3_entrypoint_common' must be skipped: 152*54fd6939SJiyong Park * 153*54fd6939SJiyong Park * - Only when the platform bypasses the BL1/BL31 entrypoint by 154*54fd6939SJiyong Park * programming the reset address do we need to initialise SCTLR_EL3. 155*54fd6939SJiyong Park * In other cases, we assume this has been taken care by the 156*54fd6939SJiyong Park * entrypoint code. 157*54fd6939SJiyong Park * 158*54fd6939SJiyong Park * - No need to determine the type of boot, we know it is a warm boot. 159*54fd6939SJiyong Park * 160*54fd6939SJiyong Park * - Do not try to distinguish between primary and secondary CPUs, this 161*54fd6939SJiyong Park * notion only exists for a cold boot. 162*54fd6939SJiyong Park * 163*54fd6939SJiyong Park * - No need to initialise the memory or the C runtime environment, 164*54fd6939SJiyong Park * it has been done once and for all on the cold boot path. 165*54fd6939SJiyong Park */ 166*54fd6939SJiyong Park el3_entrypoint_common \ 167*54fd6939SJiyong Park _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \ 168*54fd6939SJiyong Park _warm_boot_mailbox=0 \ 169*54fd6939SJiyong Park _secondary_cold_boot=0 \ 170*54fd6939SJiyong Park _init_memory=0 \ 171*54fd6939SJiyong Park _init_c_runtime=0 \ 172*54fd6939SJiyong Park _exception_vectors=runtime_exceptions \ 173*54fd6939SJiyong Park _pie_fixup_size=0 174*54fd6939SJiyong Park 175*54fd6939SJiyong Park /* 176*54fd6939SJiyong Park * We're about to enable MMU and participate in PSCI state coordination. 177*54fd6939SJiyong Park * 178*54fd6939SJiyong Park * The PSCI implementation invokes platform routines that enable CPUs to 179*54fd6939SJiyong Park * participate in coherency. On a system where CPUs are not 180*54fd6939SJiyong Park * cache-coherent without appropriate platform specific programming, 181*54fd6939SJiyong Park * having caches enabled until such time might lead to coherency issues 182*54fd6939SJiyong Park * (resulting from stale data getting speculatively fetched, among 183*54fd6939SJiyong Park * others). Therefore we keep data caches disabled even after enabling 184*54fd6939SJiyong Park * the MMU for such platforms. 185*54fd6939SJiyong Park * 186*54fd6939SJiyong Park * On systems with hardware-assisted coherency, or on single cluster 187*54fd6939SJiyong Park * platforms, such platform specific programming is not required to 188*54fd6939SJiyong Park * enter coherency (as CPUs already are); and there's no reason to have 189*54fd6939SJiyong Park * caches disabled either. 190*54fd6939SJiyong Park */ 191*54fd6939SJiyong Park#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 192*54fd6939SJiyong Park mov x0, xzr 193*54fd6939SJiyong Park#else 194*54fd6939SJiyong Park mov x0, #DISABLE_DCACHE 195*54fd6939SJiyong Park#endif 196*54fd6939SJiyong Park bl bl31_plat_enable_mmu 197*54fd6939SJiyong Park 198*54fd6939SJiyong Park#if ENABLE_RME 199*54fd6939SJiyong Park /* 200*54fd6939SJiyong Park * At warm boot GPT data structures have already been initialized in RAM 201*54fd6939SJiyong Park * but the sysregs for this CPU need to be initialized. Note that the GPT 202*54fd6939SJiyong Park * accesses are controlled attributes in GPCCR and do not depend on the 203*54fd6939SJiyong Park * SCR_EL3.C bit. 204*54fd6939SJiyong Park */ 205*54fd6939SJiyong Park bl gpt_enable 206*54fd6939SJiyong Park cbz x0, 1f 207*54fd6939SJiyong Park no_ret plat_panic_handler 208*54fd6939SJiyong Park1: 209*54fd6939SJiyong Park#endif 210*54fd6939SJiyong Park 211*54fd6939SJiyong Park#if ENABLE_PAUTH 212*54fd6939SJiyong Park /* -------------------------------------------------------------------- 213*54fd6939SJiyong Park * Program APIAKey_EL1 and enable pointer authentication 214*54fd6939SJiyong Park * -------------------------------------------------------------------- 215*54fd6939SJiyong Park */ 216*54fd6939SJiyong Park bl pauth_init_enable_el3 217*54fd6939SJiyong Park#endif /* ENABLE_PAUTH */ 218*54fd6939SJiyong Park 219*54fd6939SJiyong Park bl psci_warmboot_entrypoint 220*54fd6939SJiyong Park 221*54fd6939SJiyong Park#if ENABLE_RUNTIME_INSTRUMENTATION 222*54fd6939SJiyong Park pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI 223*54fd6939SJiyong Park mov x19, x0 224*54fd6939SJiyong Park 225*54fd6939SJiyong Park /* 226*54fd6939SJiyong Park * Invalidate before updating timestamp to ensure previous timestamp 227*54fd6939SJiyong Park * updates on the same cache line with caches disabled are properly 228*54fd6939SJiyong Park * seen by the same core. Without the cache invalidate, the core might 229*54fd6939SJiyong Park * write into a stale cache line. 230*54fd6939SJiyong Park */ 231*54fd6939SJiyong Park mov x1, #PMF_TS_SIZE 232*54fd6939SJiyong Park mov x20, x30 233*54fd6939SJiyong Park bl inv_dcache_range 234*54fd6939SJiyong Park mov x30, x20 235*54fd6939SJiyong Park 236*54fd6939SJiyong Park mrs x0, cntpct_el0 237*54fd6939SJiyong Park str x0, [x19] 238*54fd6939SJiyong Park#endif 239*54fd6939SJiyong Park b el3_exit 240*54fd6939SJiyong Parkendfunc bl31_warm_entrypoint 241