1*54fd6939SJiyong Park /*
2*54fd6939SJiyong Park * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3*54fd6939SJiyong Park *
4*54fd6939SJiyong Park * SPDX-License-Identifier: BSD-3-Clause
5*54fd6939SJiyong Park */
6*54fd6939SJiyong Park
7*54fd6939SJiyong Park #include <assert.h>
8*54fd6939SJiyong Park #include <errno.h>
9*54fd6939SJiyong Park
10*54fd6939SJiyong Park #include <platform_def.h>
11*54fd6939SJiyong Park
12*54fd6939SJiyong Park #include <arch_helpers.h>
13*54fd6939SJiyong Park #include <common/debug.h>
14*54fd6939SJiyong Park #include <drivers/arm/cci.h>
15*54fd6939SJiyong Park #include <drivers/console.h>
16*54fd6939SJiyong Park #include <lib/bakery_lock.h>
17*54fd6939SJiyong Park #include <lib/mmio.h>
18*54fd6939SJiyong Park #include <lib/psci/psci.h>
19*54fd6939SJiyong Park
20*54fd6939SJiyong Park #include <mcucfg.h>
21*54fd6939SJiyong Park #include <plat_private.h>
22*54fd6939SJiyong Park #include <power_tracer.h>
23*54fd6939SJiyong Park #include <scu.h>
24*54fd6939SJiyong Park
25*54fd6939SJiyong Park struct core_context {
26*54fd6939SJiyong Park unsigned long timer_data[8];
27*54fd6939SJiyong Park unsigned int count;
28*54fd6939SJiyong Park unsigned int rst;
29*54fd6939SJiyong Park unsigned int abt;
30*54fd6939SJiyong Park unsigned int brk;
31*54fd6939SJiyong Park };
32*54fd6939SJiyong Park
33*54fd6939SJiyong Park struct cluster_context {
34*54fd6939SJiyong Park struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER];
35*54fd6939SJiyong Park };
36*54fd6939SJiyong Park
37*54fd6939SJiyong Park /*
38*54fd6939SJiyong Park * Top level structure to hold the complete context of a multi cluster system
39*54fd6939SJiyong Park */
40*54fd6939SJiyong Park struct system_context {
41*54fd6939SJiyong Park struct cluster_context cluster[PLATFORM_CLUSTER_COUNT];
42*54fd6939SJiyong Park };
43*54fd6939SJiyong Park
44*54fd6939SJiyong Park /*
45*54fd6939SJiyong Park * Top level structure which encapsulates the context of the entire system
46*54fd6939SJiyong Park */
47*54fd6939SJiyong Park static struct system_context dormant_data[1];
48*54fd6939SJiyong Park
system_cluster(struct system_context * system,uint32_t clusterid)49*54fd6939SJiyong Park static inline struct cluster_context *system_cluster(
50*54fd6939SJiyong Park struct system_context *system,
51*54fd6939SJiyong Park uint32_t clusterid)
52*54fd6939SJiyong Park {
53*54fd6939SJiyong Park return &system->cluster[clusterid];
54*54fd6939SJiyong Park }
55*54fd6939SJiyong Park
cluster_core(struct cluster_context * cluster,uint32_t cpuid)56*54fd6939SJiyong Park static inline struct core_context *cluster_core(struct cluster_context *cluster,
57*54fd6939SJiyong Park uint32_t cpuid)
58*54fd6939SJiyong Park {
59*54fd6939SJiyong Park return &cluster->core[cpuid];
60*54fd6939SJiyong Park }
61*54fd6939SJiyong Park
get_cluster_data(unsigned long mpidr)62*54fd6939SJiyong Park static struct cluster_context *get_cluster_data(unsigned long mpidr)
63*54fd6939SJiyong Park {
64*54fd6939SJiyong Park uint32_t clusterid;
65*54fd6939SJiyong Park
66*54fd6939SJiyong Park clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
67*54fd6939SJiyong Park
68*54fd6939SJiyong Park return system_cluster(dormant_data, clusterid);
69*54fd6939SJiyong Park }
70*54fd6939SJiyong Park
get_core_data(unsigned long mpidr)71*54fd6939SJiyong Park static struct core_context *get_core_data(unsigned long mpidr)
72*54fd6939SJiyong Park {
73*54fd6939SJiyong Park struct cluster_context *cluster;
74*54fd6939SJiyong Park uint32_t cpuid;
75*54fd6939SJiyong Park
76*54fd6939SJiyong Park cluster = get_cluster_data(mpidr);
77*54fd6939SJiyong Park cpuid = mpidr & MPIDR_CPU_MASK;
78*54fd6939SJiyong Park
79*54fd6939SJiyong Park return cluster_core(cluster, cpuid);
80*54fd6939SJiyong Park }
81*54fd6939SJiyong Park
mt_save_generic_timer(unsigned long * container)82*54fd6939SJiyong Park static void mt_save_generic_timer(unsigned long *container)
83*54fd6939SJiyong Park {
84*54fd6939SJiyong Park uint64_t ctl;
85*54fd6939SJiyong Park uint64_t val;
86*54fd6939SJiyong Park
87*54fd6939SJiyong Park __asm__ volatile("mrs %x0, cntkctl_el1\n\t"
88*54fd6939SJiyong Park "mrs %x1, cntp_cval_el0\n\t"
89*54fd6939SJiyong Park "stp %x0, %x1, [%2, #0]"
90*54fd6939SJiyong Park : "=&r" (ctl), "=&r" (val)
91*54fd6939SJiyong Park : "r" (container)
92*54fd6939SJiyong Park : "memory");
93*54fd6939SJiyong Park
94*54fd6939SJiyong Park __asm__ volatile("mrs %x0, cntp_tval_el0\n\t"
95*54fd6939SJiyong Park "mrs %x1, cntp_ctl_el0\n\t"
96*54fd6939SJiyong Park "stp %x0, %x1, [%2, #16]"
97*54fd6939SJiyong Park : "=&r" (val), "=&r" (ctl)
98*54fd6939SJiyong Park : "r" (container)
99*54fd6939SJiyong Park : "memory");
100*54fd6939SJiyong Park
101*54fd6939SJiyong Park __asm__ volatile("mrs %x0, cntv_tval_el0\n\t"
102*54fd6939SJiyong Park "mrs %x1, cntv_ctl_el0\n\t"
103*54fd6939SJiyong Park "stp %x0, %x1, [%2, #32]"
104*54fd6939SJiyong Park : "=&r" (val), "=&r" (ctl)
105*54fd6939SJiyong Park : "r" (container)
106*54fd6939SJiyong Park : "memory");
107*54fd6939SJiyong Park }
108*54fd6939SJiyong Park
mt_restore_generic_timer(unsigned long * container)109*54fd6939SJiyong Park static void mt_restore_generic_timer(unsigned long *container)
110*54fd6939SJiyong Park {
111*54fd6939SJiyong Park uint64_t ctl;
112*54fd6939SJiyong Park uint64_t val;
113*54fd6939SJiyong Park
114*54fd6939SJiyong Park __asm__ volatile("ldp %x0, %x1, [%2, #0]\n\t"
115*54fd6939SJiyong Park "msr cntkctl_el1, %x0\n\t"
116*54fd6939SJiyong Park "msr cntp_cval_el0, %x1"
117*54fd6939SJiyong Park : "=&r" (ctl), "=&r" (val)
118*54fd6939SJiyong Park : "r" (container)
119*54fd6939SJiyong Park : "memory");
120*54fd6939SJiyong Park
121*54fd6939SJiyong Park __asm__ volatile("ldp %x0, %x1, [%2, #16]\n\t"
122*54fd6939SJiyong Park "msr cntp_tval_el0, %x0\n\t"
123*54fd6939SJiyong Park "msr cntp_ctl_el0, %x1"
124*54fd6939SJiyong Park : "=&r" (val), "=&r" (ctl)
125*54fd6939SJiyong Park : "r" (container)
126*54fd6939SJiyong Park : "memory");
127*54fd6939SJiyong Park
128*54fd6939SJiyong Park __asm__ volatile("ldp %x0, %x1, [%2, #32]\n\t"
129*54fd6939SJiyong Park "msr cntv_tval_el0, %x0\n\t"
130*54fd6939SJiyong Park "msr cntv_ctl_el0, %x1"
131*54fd6939SJiyong Park : "=&r" (val), "=&r" (ctl)
132*54fd6939SJiyong Park : "r" (container)
133*54fd6939SJiyong Park : "memory");
134*54fd6939SJiyong Park }
135*54fd6939SJiyong Park
stop_generic_timer(void)136*54fd6939SJiyong Park static void stop_generic_timer(void)
137*54fd6939SJiyong Park {
138*54fd6939SJiyong Park /*
139*54fd6939SJiyong Park * Disable the timer and mask the irq to prevent
140*54fd6939SJiyong Park * suprious interrupts on this cpu interface. It
141*54fd6939SJiyong Park * will bite us when we come back if we don't. It
142*54fd6939SJiyong Park * will be replayed on the inbound cluster.
143*54fd6939SJiyong Park */
144*54fd6939SJiyong Park uint64_t cntpctl = read_cntp_ctl_el0();
145*54fd6939SJiyong Park
146*54fd6939SJiyong Park write_cntp_ctl_el0(clr_cntp_ctl_enable(cntpctl));
147*54fd6939SJiyong Park }
148*54fd6939SJiyong Park
mt_cpu_save(unsigned long mpidr)149*54fd6939SJiyong Park static void mt_cpu_save(unsigned long mpidr)
150*54fd6939SJiyong Park {
151*54fd6939SJiyong Park struct core_context *core;
152*54fd6939SJiyong Park
153*54fd6939SJiyong Park core = get_core_data(mpidr);
154*54fd6939SJiyong Park mt_save_generic_timer(core->timer_data);
155*54fd6939SJiyong Park
156*54fd6939SJiyong Park /* disable timer irq, and upper layer should enable it again. */
157*54fd6939SJiyong Park stop_generic_timer();
158*54fd6939SJiyong Park }
159*54fd6939SJiyong Park
mt_cpu_restore(unsigned long mpidr)160*54fd6939SJiyong Park static void mt_cpu_restore(unsigned long mpidr)
161*54fd6939SJiyong Park {
162*54fd6939SJiyong Park struct core_context *core;
163*54fd6939SJiyong Park
164*54fd6939SJiyong Park core = get_core_data(mpidr);
165*54fd6939SJiyong Park mt_restore_generic_timer(core->timer_data);
166*54fd6939SJiyong Park }
167*54fd6939SJiyong Park
mt_platform_save_context(unsigned long mpidr)168*54fd6939SJiyong Park static void mt_platform_save_context(unsigned long mpidr)
169*54fd6939SJiyong Park {
170*54fd6939SJiyong Park /* mcusys_save_context: */
171*54fd6939SJiyong Park mt_cpu_save(mpidr);
172*54fd6939SJiyong Park }
173*54fd6939SJiyong Park
mt_platform_restore_context(unsigned long mpidr)174*54fd6939SJiyong Park static void mt_platform_restore_context(unsigned long mpidr)
175*54fd6939SJiyong Park {
176*54fd6939SJiyong Park /* mcusys_restore_context: */
177*54fd6939SJiyong Park mt_cpu_restore(mpidr);
178*54fd6939SJiyong Park }
179*54fd6939SJiyong Park
180*54fd6939SJiyong Park /*******************************************************************************
181*54fd6939SJiyong Park * Private function which is used to determine if any platform actions
182*54fd6939SJiyong Park * should be performed for the specified affinity instance given its
183*54fd6939SJiyong Park * state. Nothing needs to be done if the 'state' is not off or if this is not
184*54fd6939SJiyong Park * the highest affinity level which will enter the 'state'.
185*54fd6939SJiyong Park *******************************************************************************/
plat_do_plat_actions(unsigned int afflvl,unsigned int state)186*54fd6939SJiyong Park static int32_t plat_do_plat_actions(unsigned int afflvl, unsigned int state)
187*54fd6939SJiyong Park {
188*54fd6939SJiyong Park unsigned int max_phys_off_afflvl;
189*54fd6939SJiyong Park
190*54fd6939SJiyong Park assert(afflvl <= MPIDR_AFFLVL2);
191*54fd6939SJiyong Park
192*54fd6939SJiyong Park if (state != PSCI_STATE_OFF)
193*54fd6939SJiyong Park return -EAGAIN;
194*54fd6939SJiyong Park
195*54fd6939SJiyong Park /*
196*54fd6939SJiyong Park * Find the highest affinity level which will be suspended and postpone
197*54fd6939SJiyong Park * all the platform specific actions until that level is hit.
198*54fd6939SJiyong Park */
199*54fd6939SJiyong Park max_phys_off_afflvl = psci_get_max_phys_off_afflvl();
200*54fd6939SJiyong Park assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
201*54fd6939SJiyong Park if (afflvl != max_phys_off_afflvl)
202*54fd6939SJiyong Park return -EAGAIN;
203*54fd6939SJiyong Park
204*54fd6939SJiyong Park return 0;
205*54fd6939SJiyong Park }
206*54fd6939SJiyong Park
207*54fd6939SJiyong Park /*******************************************************************************
208*54fd6939SJiyong Park * MTK_platform handler called when an affinity instance is about to enter
209*54fd6939SJiyong Park * standby.
210*54fd6939SJiyong Park ******************************************************************************/
plat_affinst_standby(unsigned int power_state)211*54fd6939SJiyong Park static void plat_affinst_standby(unsigned int power_state)
212*54fd6939SJiyong Park {
213*54fd6939SJiyong Park unsigned int target_afflvl;
214*54fd6939SJiyong Park
215*54fd6939SJiyong Park /* Sanity check the requested state */
216*54fd6939SJiyong Park target_afflvl = psci_get_pstate_afflvl(power_state);
217*54fd6939SJiyong Park
218*54fd6939SJiyong Park /*
219*54fd6939SJiyong Park * It's possible to enter standby only on affinity level 0 i.e. a cpu
220*54fd6939SJiyong Park * on the MTK_platform. Ignore any other affinity level.
221*54fd6939SJiyong Park */
222*54fd6939SJiyong Park if (target_afflvl == MPIDR_AFFLVL0) {
223*54fd6939SJiyong Park /*
224*54fd6939SJiyong Park * Enter standby state. dsb is good practice before using wfi
225*54fd6939SJiyong Park * to enter low power states.
226*54fd6939SJiyong Park */
227*54fd6939SJiyong Park dsb();
228*54fd6939SJiyong Park wfi();
229*54fd6939SJiyong Park }
230*54fd6939SJiyong Park }
231*54fd6939SJiyong Park
232*54fd6939SJiyong Park /*******************************************************************************
233*54fd6939SJiyong Park * MTK_platform handler called when an affinity instance is about to be turned
234*54fd6939SJiyong Park * on. The level and mpidr determine the affinity instance.
235*54fd6939SJiyong Park ******************************************************************************/
plat_affinst_on(unsigned long mpidr,unsigned long sec_entrypoint,unsigned int afflvl,unsigned int state)236*54fd6939SJiyong Park static int plat_affinst_on(unsigned long mpidr,
237*54fd6939SJiyong Park unsigned long sec_entrypoint,
238*54fd6939SJiyong Park unsigned int afflvl,
239*54fd6939SJiyong Park unsigned int state)
240*54fd6939SJiyong Park {
241*54fd6939SJiyong Park int rc = PSCI_E_SUCCESS;
242*54fd6939SJiyong Park unsigned long cpu_id;
243*54fd6939SJiyong Park unsigned long cluster_id;
244*54fd6939SJiyong Park uintptr_t rv;
245*54fd6939SJiyong Park
246*54fd6939SJiyong Park /*
247*54fd6939SJiyong Park * It's possible to turn on only affinity level 0 i.e. a cpu
248*54fd6939SJiyong Park * on the MTK_platform. Ignore any other affinity level.
249*54fd6939SJiyong Park */
250*54fd6939SJiyong Park if (afflvl != MPIDR_AFFLVL0)
251*54fd6939SJiyong Park return rc;
252*54fd6939SJiyong Park
253*54fd6939SJiyong Park cpu_id = mpidr & MPIDR_CPU_MASK;
254*54fd6939SJiyong Park cluster_id = mpidr & MPIDR_CLUSTER_MASK;
255*54fd6939SJiyong Park
256*54fd6939SJiyong Park if (cluster_id)
257*54fd6939SJiyong Park rv = (uintptr_t)&mt6795_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
258*54fd6939SJiyong Park else
259*54fd6939SJiyong Park rv = (uintptr_t)&mt6795_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
260*54fd6939SJiyong Park
261*54fd6939SJiyong Park mmio_write_32(rv, sec_entrypoint);
262*54fd6939SJiyong Park INFO("mt_on[%ld:%ld], entry %x\n",
263*54fd6939SJiyong Park cluster_id, cpu_id, mmio_read_32(rv));
264*54fd6939SJiyong Park
265*54fd6939SJiyong Park return rc;
266*54fd6939SJiyong Park }
267*54fd6939SJiyong Park
268*54fd6939SJiyong Park /*******************************************************************************
269*54fd6939SJiyong Park * MTK_platform handler called when an affinity instance is about to be turned
270*54fd6939SJiyong Park * off. The level and mpidr determine the affinity instance. The 'state' arg.
271*54fd6939SJiyong Park * allows the platform to decide whether the cluster is being turned off and
272*54fd6939SJiyong Park * take apt actions.
273*54fd6939SJiyong Park *
274*54fd6939SJiyong Park * CAUTION: This function is called with coherent stacks so that caches can be
275*54fd6939SJiyong Park * turned off, flushed and coherency disabled. There is no guarantee that caches
276*54fd6939SJiyong Park * will remain turned on across calls to this function as each affinity level is
277*54fd6939SJiyong Park * dealt with. So do not write & read global variables across calls. It will be
278*54fd6939SJiyong Park * wise to do flush a write to the global to prevent unpredictable results.
279*54fd6939SJiyong Park ******************************************************************************/
plat_affinst_off(unsigned int afflvl,unsigned int state)280*54fd6939SJiyong Park static void plat_affinst_off(unsigned int afflvl, unsigned int state)
281*54fd6939SJiyong Park {
282*54fd6939SJiyong Park unsigned long mpidr = read_mpidr_el1();
283*54fd6939SJiyong Park
284*54fd6939SJiyong Park /* Determine if any platform actions need to be executed. */
285*54fd6939SJiyong Park if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
286*54fd6939SJiyong Park return;
287*54fd6939SJiyong Park
288*54fd6939SJiyong Park /* Prevent interrupts from spuriously waking up this cpu */
289*54fd6939SJiyong Park plat_mt_gic_cpuif_disable();
290*54fd6939SJiyong Park
291*54fd6939SJiyong Park trace_power_flow(mpidr, CPU_DOWN);
292*54fd6939SJiyong Park
293*54fd6939SJiyong Park if (afflvl != MPIDR_AFFLVL0) {
294*54fd6939SJiyong Park /* Disable coherency if this cluster is to be turned off */
295*54fd6939SJiyong Park plat_cci_disable();
296*54fd6939SJiyong Park
297*54fd6939SJiyong Park trace_power_flow(mpidr, CLUSTER_DOWN);
298*54fd6939SJiyong Park }
299*54fd6939SJiyong Park }
300*54fd6939SJiyong Park
301*54fd6939SJiyong Park /*******************************************************************************
302*54fd6939SJiyong Park * MTK_platform handler called when an affinity instance is about to be
303*54fd6939SJiyong Park * suspended. The level and mpidr determine the affinity instance. The 'state'
304*54fd6939SJiyong Park * arg. allows the platform to decide whether the cluster is being turned off
305*54fd6939SJiyong Park * and take apt actions.
306*54fd6939SJiyong Park *
307*54fd6939SJiyong Park * CAUTION: This function is called with coherent stacks so that caches can be
308*54fd6939SJiyong Park * turned off, flushed and coherency disabled. There is no guarantee that caches
309*54fd6939SJiyong Park * will remain turned on across calls to this function as each affinity level is
310*54fd6939SJiyong Park * dealt with. So do not write & read global variables across calls. It will be
311*54fd6939SJiyong Park * wise to do flush a write to the global to prevent unpredictable results.
312*54fd6939SJiyong Park ******************************************************************************/
plat_affinst_suspend(unsigned long sec_entrypoint,unsigned int afflvl,unsigned int state)313*54fd6939SJiyong Park static void plat_affinst_suspend(unsigned long sec_entrypoint,
314*54fd6939SJiyong Park unsigned int afflvl,
315*54fd6939SJiyong Park unsigned int state)
316*54fd6939SJiyong Park {
317*54fd6939SJiyong Park unsigned long mpidr = read_mpidr_el1();
318*54fd6939SJiyong Park unsigned long cluster_id;
319*54fd6939SJiyong Park unsigned long cpu_id;
320*54fd6939SJiyong Park uintptr_t rv;
321*54fd6939SJiyong Park
322*54fd6939SJiyong Park /* Determine if any platform actions need to be executed. */
323*54fd6939SJiyong Park if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
324*54fd6939SJiyong Park return;
325*54fd6939SJiyong Park
326*54fd6939SJiyong Park cpu_id = mpidr & MPIDR_CPU_MASK;
327*54fd6939SJiyong Park cluster_id = mpidr & MPIDR_CLUSTER_MASK;
328*54fd6939SJiyong Park
329*54fd6939SJiyong Park if (cluster_id)
330*54fd6939SJiyong Park rv = (uintptr_t)&mt6795_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
331*54fd6939SJiyong Park else
332*54fd6939SJiyong Park rv = (uintptr_t)&mt6795_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
333*54fd6939SJiyong Park
334*54fd6939SJiyong Park mmio_write_32(rv, sec_entrypoint);
335*54fd6939SJiyong Park
336*54fd6939SJiyong Park if (afflvl >= MPIDR_AFFLVL0)
337*54fd6939SJiyong Park mt_platform_save_context(mpidr);
338*54fd6939SJiyong Park
339*54fd6939SJiyong Park /* Perform the common cluster specific operations */
340*54fd6939SJiyong Park if (afflvl >= MPIDR_AFFLVL1) {
341*54fd6939SJiyong Park /* Disable coherency if this cluster is to be turned off */
342*54fd6939SJiyong Park plat_cci_disable();
343*54fd6939SJiyong Park disable_scu(mpidr);
344*54fd6939SJiyong Park
345*54fd6939SJiyong Park trace_power_flow(mpidr, CLUSTER_SUSPEND);
346*54fd6939SJiyong Park }
347*54fd6939SJiyong Park
348*54fd6939SJiyong Park if (afflvl >= MPIDR_AFFLVL2) {
349*54fd6939SJiyong Park /* Prevent interrupts from spuriously waking up this cpu */
350*54fd6939SJiyong Park plat_mt_gic_cpuif_disable();
351*54fd6939SJiyong Park }
352*54fd6939SJiyong Park }
353*54fd6939SJiyong Park
354*54fd6939SJiyong Park /*******************************************************************************
355*54fd6939SJiyong Park * MTK_platform handler called when an affinity instance has just been powered
356*54fd6939SJiyong Park * on after being turned off earlier. The level and mpidr determine the affinity
357*54fd6939SJiyong Park * instance. The 'state' arg. allows the platform to decide whether the cluster
358*54fd6939SJiyong Park * was turned off prior to wakeup and do what's necessary to setup it up
359*54fd6939SJiyong Park * correctly.
360*54fd6939SJiyong Park ******************************************************************************/
plat_affinst_on_finish(unsigned int afflvl,unsigned int state)361*54fd6939SJiyong Park static void plat_affinst_on_finish(unsigned int afflvl, unsigned int state)
362*54fd6939SJiyong Park {
363*54fd6939SJiyong Park unsigned long mpidr = read_mpidr_el1();
364*54fd6939SJiyong Park
365*54fd6939SJiyong Park /* Determine if any platform actions need to be executed. */
366*54fd6939SJiyong Park if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
367*54fd6939SJiyong Park return;
368*54fd6939SJiyong Park
369*54fd6939SJiyong Park /* Perform the common cluster specific operations */
370*54fd6939SJiyong Park if (afflvl >= MPIDR_AFFLVL1) {
371*54fd6939SJiyong Park enable_scu(mpidr);
372*54fd6939SJiyong Park
373*54fd6939SJiyong Park /* Enable coherency if this cluster was off */
374*54fd6939SJiyong Park plat_cci_enable();
375*54fd6939SJiyong Park trace_power_flow(mpidr, CLUSTER_UP);
376*54fd6939SJiyong Park }
377*54fd6939SJiyong Park
378*54fd6939SJiyong Park /* Enable the gic cpu interface */
379*54fd6939SJiyong Park plat_mt_gic_cpuif_enable();
380*54fd6939SJiyong Park plat_mt_gic_pcpu_init();
381*54fd6939SJiyong Park trace_power_flow(mpidr, CPU_UP);
382*54fd6939SJiyong Park }
383*54fd6939SJiyong Park
384*54fd6939SJiyong Park /*******************************************************************************
385*54fd6939SJiyong Park * MTK_platform handler called when an affinity instance has just been powered
386*54fd6939SJiyong Park * on after having been suspended earlier. The level and mpidr determine the
387*54fd6939SJiyong Park * affinity instance.
388*54fd6939SJiyong Park ******************************************************************************/
plat_affinst_suspend_finish(unsigned int afflvl,unsigned int state)389*54fd6939SJiyong Park static void plat_affinst_suspend_finish(unsigned int afflvl, unsigned int state)
390*54fd6939SJiyong Park {
391*54fd6939SJiyong Park unsigned long mpidr = read_mpidr_el1();
392*54fd6939SJiyong Park
393*54fd6939SJiyong Park /* Determine if any platform actions need to be executed. */
394*54fd6939SJiyong Park if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
395*54fd6939SJiyong Park return;
396*54fd6939SJiyong Park
397*54fd6939SJiyong Park if (afflvl >= MPIDR_AFFLVL2) {
398*54fd6939SJiyong Park /* Enable the gic cpu interface */
399*54fd6939SJiyong Park plat_mt_gic_init();
400*54fd6939SJiyong Park plat_mt_gic_cpuif_enable();
401*54fd6939SJiyong Park }
402*54fd6939SJiyong Park
403*54fd6939SJiyong Park /* Perform the common cluster specific operations */
404*54fd6939SJiyong Park if (afflvl >= MPIDR_AFFLVL1) {
405*54fd6939SJiyong Park enable_scu(mpidr);
406*54fd6939SJiyong Park
407*54fd6939SJiyong Park /* Enable coherency if this cluster was off */
408*54fd6939SJiyong Park plat_cci_enable();
409*54fd6939SJiyong Park trace_power_flow(mpidr, CLUSTER_UP);
410*54fd6939SJiyong Park }
411*54fd6939SJiyong Park
412*54fd6939SJiyong Park if (afflvl >= MPIDR_AFFLVL0)
413*54fd6939SJiyong Park mt_platform_restore_context(mpidr);
414*54fd6939SJiyong Park
415*54fd6939SJiyong Park plat_mt_gic_pcpu_init();
416*54fd6939SJiyong Park }
417*54fd6939SJiyong Park
plat_get_sys_suspend_power_state(void)418*54fd6939SJiyong Park static unsigned int plat_get_sys_suspend_power_state(void)
419*54fd6939SJiyong Park {
420*54fd6939SJiyong Park /* StateID: 0, StateType: 1(power down), PowerLevel: 2(system) */
421*54fd6939SJiyong Park return psci_make_powerstate(0, 1, 2);
422*54fd6939SJiyong Park }
423*54fd6939SJiyong Park
424*54fd6939SJiyong Park /*******************************************************************************
425*54fd6939SJiyong Park * MTK handlers to shutdown/reboot the system
426*54fd6939SJiyong Park ******************************************************************************/
plat_system_off(void)427*54fd6939SJiyong Park static void __dead2 plat_system_off(void)
428*54fd6939SJiyong Park {
429*54fd6939SJiyong Park INFO("MTK System Off\n");
430*54fd6939SJiyong Park wfi();
431*54fd6939SJiyong Park ERROR("MTK System Off: operation not handled.\n");
432*54fd6939SJiyong Park panic();
433*54fd6939SJiyong Park }
434*54fd6939SJiyong Park
plat_system_reset(void)435*54fd6939SJiyong Park static void __dead2 plat_system_reset(void)
436*54fd6939SJiyong Park {
437*54fd6939SJiyong Park /* Write the System Configuration Control Register */
438*54fd6939SJiyong Park INFO("MTK System Reset\n");
439*54fd6939SJiyong Park
440*54fd6939SJiyong Park mmio_clrbits_32(MTK_WDT_BASE,
441*54fd6939SJiyong Park (MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ));
442*54fd6939SJiyong Park mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN));
443*54fd6939SJiyong Park mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY);
444*54fd6939SJiyong Park
445*54fd6939SJiyong Park wfi();
446*54fd6939SJiyong Park ERROR("MTK System Reset: operation not handled.\n");
447*54fd6939SJiyong Park panic();
448*54fd6939SJiyong Park }
449*54fd6939SJiyong Park
450*54fd6939SJiyong Park /*******************************************************************************
451*54fd6939SJiyong Park * Export the platform handlers to enable psci to invoke them
452*54fd6939SJiyong Park ******************************************************************************/
453*54fd6939SJiyong Park static const plat_pm_ops_t plat_plat_pm_ops = {
454*54fd6939SJiyong Park .affinst_standby = plat_affinst_standby,
455*54fd6939SJiyong Park .affinst_on = plat_affinst_on,
456*54fd6939SJiyong Park .affinst_off = plat_affinst_off,
457*54fd6939SJiyong Park .affinst_suspend = plat_affinst_suspend,
458*54fd6939SJiyong Park .affinst_on_finish = plat_affinst_on_finish,
459*54fd6939SJiyong Park .affinst_suspend_finish = plat_affinst_suspend_finish,
460*54fd6939SJiyong Park .system_off = plat_system_off,
461*54fd6939SJiyong Park .system_reset = plat_system_reset,
462*54fd6939SJiyong Park .get_sys_suspend_power_state = plat_get_sys_suspend_power_state,
463*54fd6939SJiyong Park };
464*54fd6939SJiyong Park
465*54fd6939SJiyong Park /*******************************************************************************
466*54fd6939SJiyong Park * Export the platform specific power ops & initialize the mtk_platform power
467*54fd6939SJiyong Park * controller
468*54fd6939SJiyong Park ******************************************************************************/
platform_setup_pm(const plat_pm_ops_t ** plat_ops)469*54fd6939SJiyong Park int platform_setup_pm(const plat_pm_ops_t **plat_ops)
470*54fd6939SJiyong Park {
471*54fd6939SJiyong Park *plat_ops = &plat_plat_pm_ops;
472*54fd6939SJiyong Park return 0;
473*54fd6939SJiyong Park }
474