1*54fd6939SJiyong Park/* 2*54fd6939SJiyong Park * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. 3*54fd6939SJiyong Park * 4*54fd6939SJiyong Park * SPDX-License-Identifier: BSD-3-Clause 5*54fd6939SJiyong Park */ 6*54fd6939SJiyong Park 7*54fd6939SJiyong Park#include <arch.h> 8*54fd6939SJiyong Park#include <asm_macros.S> 9*54fd6939SJiyong Park#include <assert_macros.S> 10*54fd6939SJiyong Park#include <cpu_macros.S> 11*54fd6939SJiyong Park#include <common/bl_common.h> 12*54fd6939SJiyong Park#include <lib/el3_runtime/cpu_data.h> 13*54fd6939SJiyong Park 14*54fd6939SJiyong Park#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || (defined(IMAGE_BL2) && BL2_AT_EL3) 15*54fd6939SJiyong Park /* 16*54fd6939SJiyong Park * The reset handler common to all platforms. After a matching 17*54fd6939SJiyong Park * cpu_ops structure entry is found, the correponding reset_handler 18*54fd6939SJiyong Park * in the cpu_ops is invoked. The reset handler is invoked very early 19*54fd6939SJiyong Park * in the boot sequence and it is assumed that we can clobber r0 - r10 20*54fd6939SJiyong Park * without the need to follow AAPCS. 21*54fd6939SJiyong Park * Clobbers: r0 - r10 22*54fd6939SJiyong Park */ 23*54fd6939SJiyong Park .globl reset_handler 24*54fd6939SJiyong Parkfunc reset_handler 25*54fd6939SJiyong Park mov r8, lr 26*54fd6939SJiyong Park 27*54fd6939SJiyong Park /* The plat_reset_handler can clobber r0 - r7 */ 28*54fd6939SJiyong Park bl plat_reset_handler 29*54fd6939SJiyong Park 30*54fd6939SJiyong Park /* Get the matching cpu_ops pointer (clobbers: r0 - r5) */ 31*54fd6939SJiyong Park bl get_cpu_ops_ptr 32*54fd6939SJiyong Park 33*54fd6939SJiyong Park#if ENABLE_ASSERTIONS 34*54fd6939SJiyong Park cmp r0, #0 35*54fd6939SJiyong Park ASM_ASSERT(ne) 36*54fd6939SJiyong Park#endif 37*54fd6939SJiyong Park 38*54fd6939SJiyong Park /* Get the cpu_ops reset handler */ 39*54fd6939SJiyong Park ldr r1, [r0, #CPU_RESET_FUNC] 40*54fd6939SJiyong Park cmp r1, #0 41*54fd6939SJiyong Park mov lr, r8 42*54fd6939SJiyong Park bxne r1 43*54fd6939SJiyong Park bx lr 44*54fd6939SJiyong Parkendfunc reset_handler 45*54fd6939SJiyong Park 46*54fd6939SJiyong Park#endif 47*54fd6939SJiyong Park 48*54fd6939SJiyong Park#ifdef IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */ 49*54fd6939SJiyong Park /* 50*54fd6939SJiyong Park * void prepare_cpu_pwr_dwn(unsigned int power_level) 51*54fd6939SJiyong Park * 52*54fd6939SJiyong Park * Prepare CPU power down function for all platforms. The function takes 53*54fd6939SJiyong Park * a domain level to be powered down as its parameter. After the cpu_ops 54*54fd6939SJiyong Park * pointer is retrieved from cpu_data, the handler for requested power 55*54fd6939SJiyong Park * level is called. 56*54fd6939SJiyong Park */ 57*54fd6939SJiyong Park .globl prepare_cpu_pwr_dwn 58*54fd6939SJiyong Parkfunc prepare_cpu_pwr_dwn 59*54fd6939SJiyong Park /* 60*54fd6939SJiyong Park * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the 61*54fd6939SJiyong Park * power down handler for the last power level 62*54fd6939SJiyong Park */ 63*54fd6939SJiyong Park mov r2, #(CPU_MAX_PWR_DWN_OPS - 1) 64*54fd6939SJiyong Park cmp r0, r2 65*54fd6939SJiyong Park movhi r0, r2 66*54fd6939SJiyong Park 67*54fd6939SJiyong Park push {r0, lr} 68*54fd6939SJiyong Park bl _cpu_data 69*54fd6939SJiyong Park pop {r2, lr} 70*54fd6939SJiyong Park 71*54fd6939SJiyong Park ldr r0, [r0, #CPU_DATA_CPU_OPS_PTR] 72*54fd6939SJiyong Park#if ENABLE_ASSERTIONS 73*54fd6939SJiyong Park cmp r0, #0 74*54fd6939SJiyong Park ASM_ASSERT(ne) 75*54fd6939SJiyong Park#endif 76*54fd6939SJiyong Park 77*54fd6939SJiyong Park /* Get the appropriate power down handler */ 78*54fd6939SJiyong Park mov r1, #CPU_PWR_DWN_OPS 79*54fd6939SJiyong Park add r1, r1, r2, lsl #2 80*54fd6939SJiyong Park ldr r1, [r0, r1] 81*54fd6939SJiyong Park#if ENABLE_ASSERTIONS 82*54fd6939SJiyong Park cmp r1, #0 83*54fd6939SJiyong Park ASM_ASSERT(ne) 84*54fd6939SJiyong Park#endif 85*54fd6939SJiyong Park bx r1 86*54fd6939SJiyong Parkendfunc prepare_cpu_pwr_dwn 87*54fd6939SJiyong Park 88*54fd6939SJiyong Park /* 89*54fd6939SJiyong Park * Initializes the cpu_ops_ptr if not already initialized 90*54fd6939SJiyong Park * in cpu_data. This must only be called after the data cache 91*54fd6939SJiyong Park * is enabled. AAPCS is followed. 92*54fd6939SJiyong Park */ 93*54fd6939SJiyong Park .globl init_cpu_ops 94*54fd6939SJiyong Parkfunc init_cpu_ops 95*54fd6939SJiyong Park push {r4 - r6, lr} 96*54fd6939SJiyong Park bl _cpu_data 97*54fd6939SJiyong Park mov r6, r0 98*54fd6939SJiyong Park ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] 99*54fd6939SJiyong Park cmp r1, #0 100*54fd6939SJiyong Park bne 1f 101*54fd6939SJiyong Park bl get_cpu_ops_ptr 102*54fd6939SJiyong Park#if ENABLE_ASSERTIONS 103*54fd6939SJiyong Park cmp r0, #0 104*54fd6939SJiyong Park ASM_ASSERT(ne) 105*54fd6939SJiyong Park#endif 106*54fd6939SJiyong Park str r0, [r6, #CPU_DATA_CPU_OPS_PTR]! 107*54fd6939SJiyong Park1: 108*54fd6939SJiyong Park pop {r4 - r6, pc} 109*54fd6939SJiyong Parkendfunc init_cpu_ops 110*54fd6939SJiyong Park 111*54fd6939SJiyong Park#endif /* IMAGE_BL32 */ 112*54fd6939SJiyong Park 113*54fd6939SJiyong Park /* 114*54fd6939SJiyong Park * The below function returns the cpu_ops structure matching the 115*54fd6939SJiyong Park * midr of the core. It reads the MIDR and finds the matching 116*54fd6939SJiyong Park * entry in cpu_ops entries. Only the implementation and part number 117*54fd6939SJiyong Park * are used to match the entries. 118*54fd6939SJiyong Park * Return : 119*54fd6939SJiyong Park * r0 - The matching cpu_ops pointer on Success 120*54fd6939SJiyong Park * r0 - 0 on failure. 121*54fd6939SJiyong Park * Clobbers: r0 - r5 122*54fd6939SJiyong Park */ 123*54fd6939SJiyong Park .globl get_cpu_ops_ptr 124*54fd6939SJiyong Parkfunc get_cpu_ops_ptr 125*54fd6939SJiyong Park /* Get the cpu_ops start and end locations */ 126*54fd6939SJiyong Park ldr r4, =(__CPU_OPS_START__ + CPU_MIDR) 127*54fd6939SJiyong Park ldr r5, =(__CPU_OPS_END__ + CPU_MIDR) 128*54fd6939SJiyong Park 129*54fd6939SJiyong Park /* Initialize the return parameter */ 130*54fd6939SJiyong Park mov r0, #0 131*54fd6939SJiyong Park 132*54fd6939SJiyong Park /* Read the MIDR_EL1 */ 133*54fd6939SJiyong Park ldcopr r2, MIDR 134*54fd6939SJiyong Park ldr r3, =CPU_IMPL_PN_MASK 135*54fd6939SJiyong Park 136*54fd6939SJiyong Park /* Retain only the implementation and part number using mask */ 137*54fd6939SJiyong Park and r2, r2, r3 138*54fd6939SJiyong Park1: 139*54fd6939SJiyong Park /* Check if we have reached end of list */ 140*54fd6939SJiyong Park cmp r4, r5 141*54fd6939SJiyong Park bhs error_exit 142*54fd6939SJiyong Park 143*54fd6939SJiyong Park /* load the midr from the cpu_ops */ 144*54fd6939SJiyong Park ldr r1, [r4], #CPU_OPS_SIZE 145*54fd6939SJiyong Park and r1, r1, r3 146*54fd6939SJiyong Park 147*54fd6939SJiyong Park /* Check if midr matches to midr of this core */ 148*54fd6939SJiyong Park cmp r1, r2 149*54fd6939SJiyong Park bne 1b 150*54fd6939SJiyong Park 151*54fd6939SJiyong Park /* Subtract the increment and offset to get the cpu-ops pointer */ 152*54fd6939SJiyong Park sub r0, r4, #(CPU_OPS_SIZE + CPU_MIDR) 153*54fd6939SJiyong Park#if ENABLE_ASSERTIONS 154*54fd6939SJiyong Park cmp r0, #0 155*54fd6939SJiyong Park ASM_ASSERT(ne) 156*54fd6939SJiyong Park#endif 157*54fd6939SJiyong Parkerror_exit: 158*54fd6939SJiyong Park bx lr 159*54fd6939SJiyong Parkendfunc get_cpu_ops_ptr 160*54fd6939SJiyong Park 161*54fd6939SJiyong Park/* 162*54fd6939SJiyong Park * Extract CPU revision and variant, and combine them into a single numeric for 163*54fd6939SJiyong Park * easier comparison. 164*54fd6939SJiyong Park */ 165*54fd6939SJiyong Park .globl cpu_get_rev_var 166*54fd6939SJiyong Parkfunc cpu_get_rev_var 167*54fd6939SJiyong Park ldcopr r1, MIDR 168*54fd6939SJiyong Park 169*54fd6939SJiyong Park /* 170*54fd6939SJiyong Park * Extract the variant[23:20] and revision[3:0] from r1 and pack it in 171*54fd6939SJiyong Park * r0[0:7] as variant[7:4] and revision[3:0]: 172*54fd6939SJiyong Park * 173*54fd6939SJiyong Park * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then 174*54fd6939SJiyong Park * extract r1[3:0] into r0[3:0] retaining other bits. 175*54fd6939SJiyong Park */ 176*54fd6939SJiyong Park ubfx r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS) 177*54fd6939SJiyong Park bfi r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS 178*54fd6939SJiyong Park bx lr 179*54fd6939SJiyong Parkendfunc cpu_get_rev_var 180*54fd6939SJiyong Park 181*54fd6939SJiyong Park/* 182*54fd6939SJiyong Park * Compare the CPU's revision-variant (r0) with a given value (r1), for errata 183*54fd6939SJiyong Park * application purposes. If the revision-variant is less than or same as a given 184*54fd6939SJiyong Park * value, indicates that errata applies; otherwise not. 185*54fd6939SJiyong Park */ 186*54fd6939SJiyong Park .globl cpu_rev_var_ls 187*54fd6939SJiyong Parkfunc cpu_rev_var_ls 188*54fd6939SJiyong Park cmp r0, r1 189*54fd6939SJiyong Park movls r0, #ERRATA_APPLIES 190*54fd6939SJiyong Park movhi r0, #ERRATA_NOT_APPLIES 191*54fd6939SJiyong Park bx lr 192*54fd6939SJiyong Parkendfunc cpu_rev_var_ls 193*54fd6939SJiyong Park 194*54fd6939SJiyong Park/* 195*54fd6939SJiyong Park * Compare the CPU's revision-variant (r0) with a given value (r1), for errata 196*54fd6939SJiyong Park * application purposes. If the revision-variant is higher than or same as a 197*54fd6939SJiyong Park * given value, indicates that errata applies; otherwise not. 198*54fd6939SJiyong Park */ 199*54fd6939SJiyong Park .globl cpu_rev_var_hs 200*54fd6939SJiyong Parkfunc cpu_rev_var_hs 201*54fd6939SJiyong Park cmp r0, r1 202*54fd6939SJiyong Park movge r0, #ERRATA_APPLIES 203*54fd6939SJiyong Park movlt r0, #ERRATA_NOT_APPLIES 204*54fd6939SJiyong Park bx lr 205*54fd6939SJiyong Parkendfunc cpu_rev_var_hs 206*54fd6939SJiyong Park 207*54fd6939SJiyong Park#if REPORT_ERRATA 208*54fd6939SJiyong Park/* 209*54fd6939SJiyong Park * void print_errata_status(void); 210*54fd6939SJiyong Park * 211*54fd6939SJiyong Park * Function to print errata status for CPUs of its class. Must be called only: 212*54fd6939SJiyong Park * 213*54fd6939SJiyong Park * - with MMU and data caches are enabled; 214*54fd6939SJiyong Park * - after cpu_ops have been initialized in per-CPU data. 215*54fd6939SJiyong Park */ 216*54fd6939SJiyong Park .globl print_errata_status 217*54fd6939SJiyong Parkfunc print_errata_status 218*54fd6939SJiyong Park /* r12 is pushed only for the sake of 8-byte stack alignment */ 219*54fd6939SJiyong Park push {r4, r5, r12, lr} 220*54fd6939SJiyong Park#ifdef IMAGE_BL1 221*54fd6939SJiyong Park /* 222*54fd6939SJiyong Park * BL1 doesn't have per-CPU data. So retrieve the CPU operations 223*54fd6939SJiyong Park * directly. 224*54fd6939SJiyong Park */ 225*54fd6939SJiyong Park bl get_cpu_ops_ptr 226*54fd6939SJiyong Park ldr r0, [r0, #CPU_ERRATA_FUNC] 227*54fd6939SJiyong Park cmp r0, #0 228*54fd6939SJiyong Park blxne r0 229*54fd6939SJiyong Park#else 230*54fd6939SJiyong Park /* 231*54fd6939SJiyong Park * Retrieve pointer to cpu_ops, and further, the errata printing 232*54fd6939SJiyong Park * function. If it's non-NULL, jump to the function in turn. 233*54fd6939SJiyong Park */ 234*54fd6939SJiyong Park bl _cpu_data 235*54fd6939SJiyong Park#if ENABLE_ASSERTIONS 236*54fd6939SJiyong Park cmp r0, #0 237*54fd6939SJiyong Park ASM_ASSERT(ne) 238*54fd6939SJiyong Park#endif 239*54fd6939SJiyong Park ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] 240*54fd6939SJiyong Park#if ENABLE_ASSERTIONS 241*54fd6939SJiyong Park cmp r1, #0 242*54fd6939SJiyong Park ASM_ASSERT(ne) 243*54fd6939SJiyong Park#endif 244*54fd6939SJiyong Park ldr r0, [r1, #CPU_ERRATA_FUNC] 245*54fd6939SJiyong Park cmp r0, #0 246*54fd6939SJiyong Park beq 1f 247*54fd6939SJiyong Park 248*54fd6939SJiyong Park mov r4, r0 249*54fd6939SJiyong Park 250*54fd6939SJiyong Park /* 251*54fd6939SJiyong Park * Load pointers to errata lock and printed flag. Call 252*54fd6939SJiyong Park * errata_needs_reporting to check whether this CPU needs to report 253*54fd6939SJiyong Park * errata status pertaining to its class. 254*54fd6939SJiyong Park */ 255*54fd6939SJiyong Park ldr r0, [r1, #CPU_ERRATA_LOCK] 256*54fd6939SJiyong Park ldr r1, [r1, #CPU_ERRATA_PRINTED] 257*54fd6939SJiyong Park bl errata_needs_reporting 258*54fd6939SJiyong Park cmp r0, #0 259*54fd6939SJiyong Park blxne r4 260*54fd6939SJiyong Park1: 261*54fd6939SJiyong Park#endif 262*54fd6939SJiyong Park pop {r4, r5, r12, pc} 263*54fd6939SJiyong Parkendfunc print_errata_status 264*54fd6939SJiyong Park#endif 265