1 /*
2 * Copyright (c) 2019-2022, Xilinx, Inc. All rights reserved.
3 * Copyright (c) 2022-2023, Advanced Micro Devices, Inc. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8 /*
9 * Top-level SMC handler for Versal power management calls and
10 * IPI setup functions for communication with PMC.
11 */
12
13 #include <errno.h>
14 #include <stdbool.h>
15
16 #include "../drivers/arm/gic/v3/gicv3_private.h"
17
18 #include <common/runtime_svc.h>
19 #include <drivers/arm/gicv3.h>
20 #include <lib/psci/psci.h>
21 #include <plat/arm/common/plat_arm.h>
22 #include <plat/common/platform.h>
23
24 #include <plat_private.h>
25 #include "pm_api_sys.h"
26 #include "pm_client.h"
27 #include "pm_ipi.h"
28 #include "pm_svc_main.h"
29
30 #define MODE 0x80000000U
31
32 #define XSCUGIC_SGIR_EL1_INITID_SHIFT 24U
33 #define INVALID_SGI 0xFFU
34 #define PM_INIT_SUSPEND_CB (30U)
35 #define PM_NOTIFY_CB (32U)
36 #define EVENT_CPU_PWRDWN (4U)
37 #define MBOX_SGI_SHARED_IPI (7U)
38
39 /* 1 sec of wait timeout for secondary core down */
40 #define PWRDWN_WAIT_TIMEOUT (1000U)
41 DEFINE_RENAME_SYSREG_RW_FUNCS(icc_asgi1r_el1, S3_0_C12_C11_6)
42
43 /* pm_up = true - UP, pm_up = false - DOWN */
44 static bool pm_up;
45 static uint32_t sgi = (uint32_t)INVALID_SGI;
46 bool pwrdwn_req_received;
47
notify_os(void)48 static void notify_os(void)
49 {
50 plat_ic_raise_ns_sgi(sgi, read_mpidr_el1());
51 }
52
cpu_pwrdwn_req_handler(uint32_t id,uint32_t flags,void * handle,void * cookie)53 static uint64_t cpu_pwrdwn_req_handler(uint32_t id, uint32_t flags,
54 void *handle, void *cookie)
55 {
56 uint32_t cpu_id = plat_my_core_pos();
57
58 VERBOSE("Powering down CPU %d\n", cpu_id);
59
60 /* Deactivate CPU power down SGI */
61 plat_ic_end_of_interrupt(CPU_PWR_DOWN_REQ_INTR);
62
63 return psci_cpu_off();
64 }
65
66 /**
67 * raise_pwr_down_interrupt() - Callback function to raise SGI.
68 * @mpidr: MPIDR for the target CPU.
69 *
70 * Raise SGI interrupt to trigger the CPU power down sequence on all the
71 * online secondary cores.
72 */
raise_pwr_down_interrupt(u_register_t mpidr)73 static void raise_pwr_down_interrupt(u_register_t mpidr)
74 {
75 plat_ic_raise_el3_sgi(CPU_PWR_DOWN_REQ_INTR, mpidr);
76 }
77
request_cpu_pwrdwn(void)78 void request_cpu_pwrdwn(void)
79 {
80 enum pm_ret_status ret;
81
82 VERBOSE("CPU power down request received\n");
83
84 /* Send powerdown request to online secondary core(s) */
85 ret = psci_stop_other_cores(PWRDWN_WAIT_TIMEOUT, raise_pwr_down_interrupt);
86 if (ret != PSCI_E_SUCCESS) {
87 ERROR("Failed to powerdown secondary core(s)\n");
88 }
89
90 /* Clear IPI IRQ */
91 pm_ipi_irq_clear(primary_proc);
92
93 /* Deactivate IPI IRQ */
94 plat_ic_end_of_interrupt(PLAT_VERSAL_IPI_IRQ);
95 }
96
ipi_fiq_handler(uint32_t id,uint32_t flags,void * handle,void * cookie)97 static uint64_t ipi_fiq_handler(uint32_t id, uint32_t flags, void *handle,
98 void *cookie)
99 {
100 uint32_t payload[4] = {0};
101 enum pm_ret_status ret;
102 int ipi_status, i;
103
104 VERBOSE("Received IPI FIQ from firmware\n");
105
106 console_flush();
107 (void)plat_ic_acknowledge_interrupt();
108
109 /* Check status register for each IPI except PMC */
110 for (i = IPI_ID_APU; i <= IPI_ID_5; i++) {
111 ipi_status = ipi_mb_enquire_status(IPI_ID_APU, i);
112
113 /* If any agent other than PMC has generated IPI FIQ then send SGI to mbox driver */
114 if (ipi_status & IPI_MB_STATUS_RECV_PENDING) {
115 plat_ic_raise_ns_sgi(MBOX_SGI_SHARED_IPI, read_mpidr_el1());
116 break;
117 }
118 }
119
120 /* If PMC has not generated interrupt then end ISR */
121 ipi_status = ipi_mb_enquire_status(IPI_ID_APU, IPI_ID_PMC);
122 if ((ipi_status & IPI_MB_STATUS_RECV_PENDING) == 0) {
123 plat_ic_end_of_interrupt(id);
124 return 0;
125 }
126
127 /* Handle PMC case */
128 ret = pm_get_callbackdata(payload, ARRAY_SIZE(payload), 0, 0);
129 if (ret != PM_RET_SUCCESS) {
130 payload[0] = ret;
131 }
132
133 switch (payload[0]) {
134 case PM_INIT_SUSPEND_CB:
135 if (sgi != INVALID_SGI) {
136 notify_os();
137 }
138 break;
139 case PM_NOTIFY_CB:
140 if (sgi != INVALID_SGI) {
141 if (payload[2] == EVENT_CPU_PWRDWN) {
142 if (pwrdwn_req_received) {
143 pwrdwn_req_received = false;
144 request_cpu_pwrdwn();
145 (void)psci_cpu_off();
146 break;
147 } else {
148 pwrdwn_req_received = true;
149 }
150 }
151 notify_os();
152 }
153 break;
154 case PM_RET_ERROR_INVALID_CRC:
155 pm_ipi_irq_clear(primary_proc);
156 WARN("Invalid CRC in the payload\n");
157 break;
158
159 default:
160 pm_ipi_irq_clear(primary_proc);
161 WARN("Invalid IPI payload\n");
162 break;
163 }
164
165 /* Clear FIQ */
166 plat_ic_end_of_interrupt(id);
167
168 return 0;
169 }
170
171 /**
172 * pm_register_sgi() - PM register the IPI interrupt.
173 * @sgi_num: SGI number to be used for communication.
174 * @reset: Reset to invalid SGI when reset=1.
175 *
176 * Return: On success, the initialization function must return 0.
177 * Any other return value will cause the framework to ignore
178 * the service.
179 *
180 * Update the SGI number to be used.
181 *
182 */
pm_register_sgi(uint32_t sgi_num,uint32_t reset)183 int32_t pm_register_sgi(uint32_t sgi_num, uint32_t reset)
184 {
185 if (reset == 1U) {
186 sgi = INVALID_SGI;
187 return 0;
188 }
189
190 if (sgi != INVALID_SGI) {
191 return -EBUSY;
192 }
193
194 if (sgi_num >= GICV3_MAX_SGI_TARGETS) {
195 return -EINVAL;
196 }
197
198 sgi = (uint32_t)sgi_num;
199 return 0;
200 }
201
202 /**
203 * pm_setup() - PM service setup.
204 *
205 * Return: On success, the initialization function must return 0.
206 * Any other return value will cause the framework to ignore
207 * the service.
208 *
209 * Initialization functions for Versal power management for
210 * communicaton with PMC.
211 *
212 * Called from sip_svc_setup initialization function with the
213 * rt_svc_init signature.
214 *
215 */
pm_setup(void)216 int32_t pm_setup(void)
217 {
218 int32_t ret = 0;
219
220 pm_ipi_init(primary_proc);
221 pm_up = true;
222
223 /* register SGI handler for CPU power down request */
224 ret = request_intr_type_el3(CPU_PWR_DOWN_REQ_INTR, cpu_pwrdwn_req_handler);
225 if (ret != 0) {
226 WARN("BL31: registering SGI interrupt failed\n");
227 }
228
229 /*
230 * Enable IPI IRQ
231 * assume the rich OS is OK to handle callback IRQs now.
232 * Even if we were wrong, it would not enable the IRQ in
233 * the GIC.
234 */
235 pm_ipi_irq_enable(primary_proc);
236
237 ret = request_intr_type_el3(PLAT_VERSAL_IPI_IRQ, ipi_fiq_handler);
238 if (ret != 0) {
239 WARN("BL31: registering IPI interrupt failed\n");
240 }
241
242 gicd_write_irouter(gicv3_driver_data->gicd_base, PLAT_VERSAL_IPI_IRQ, MODE);
243 return ret;
244 }
245
246 /**
247 * eemi_for_compatibility() - EEMI calls handler for deprecated calls.
248 * @api_id: identifier for the API being called.
249 * @pm_arg: pointer to the argument data for the API call.
250 * @handle: Pointer to caller's context structure.
251 * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
252 *
253 * Return: If EEMI API found then, uintptr_t type address, else 0.
254 *
255 * Some EEMI API's use case needs to be changed in Linux driver, so they
256 * can take advantage of common EEMI handler in TF-A. As of now the old
257 * implementation of these APIs are required to maintain backward compatibility
258 * until their use case in linux driver changes.
259 *
260 */
eemi_for_compatibility(uint32_t api_id,uint32_t * pm_arg,void * handle,uint32_t security_flag)261 static uintptr_t eemi_for_compatibility(uint32_t api_id, uint32_t *pm_arg,
262 void *handle, uint32_t security_flag)
263 {
264 enum pm_ret_status ret;
265
266 switch (api_id) {
267
268 case (uint32_t)PM_IOCTL:
269 {
270 uint32_t value = 0U;
271
272 ret = pm_api_ioctl(pm_arg[0], pm_arg[1], pm_arg[2],
273 pm_arg[3], pm_arg[4],
274 &value, security_flag);
275 if (ret == PM_RET_ERROR_NOTSUPPORTED)
276 return (uintptr_t)0;
277
278 SMC_RET1(handle, (uint64_t)ret | ((uint64_t)value) << 32U);
279 }
280
281 case (uint32_t)PM_QUERY_DATA:
282 {
283 uint32_t data[PAYLOAD_ARG_CNT] = { 0 };
284
285 ret = pm_query_data(pm_arg[0], pm_arg[1], pm_arg[2],
286 pm_arg[3], data, security_flag);
287
288 SMC_RET2(handle, (uint64_t)ret | ((uint64_t)data[0] << 32U),
289 (uint64_t)data[1] | ((uint64_t)data[2] << 32U));
290 }
291
292 case (uint32_t)PM_FEATURE_CHECK:
293 {
294 uint32_t result[PAYLOAD_ARG_CNT] = {0U};
295
296 ret = pm_feature_check(pm_arg[0], result, security_flag);
297 SMC_RET2(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U),
298 (uint64_t)result[1] | ((uint64_t)result[2] << 32U));
299 }
300
301 case PM_LOAD_PDI:
302 {
303 ret = pm_load_pdi(pm_arg[0], pm_arg[1], pm_arg[2],
304 security_flag);
305 SMC_RET1(handle, (uint64_t)ret);
306 }
307
308 default:
309 return (uintptr_t)0;
310 }
311 }
312
313 /**
314 * eemi_psci_debugfs_handler() - EEMI API invoked from PSCI.
315 * @api_id: identifier for the API being called.
316 * @pm_arg: pointer to the argument data for the API call.
317 * @handle: Pointer to caller's context structure.
318 * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
319 *
320 * These EEMI APIs performs CPU specific power management tasks.
321 * These EEMI APIs are invoked either from PSCI or from debugfs in kernel.
322 * These calls require CPU specific processing before sending IPI request to
323 * Platform Management Controller. For example enable/disable CPU specific
324 * interrupts. This requires separate handler for these calls and may not be
325 * handled using common eemi handler.
326 *
327 * Return: If EEMI API found then, uintptr_t type address, else 0.
328 *
329 */
eemi_psci_debugfs_handler(uint32_t api_id,uint32_t * pm_arg,void * handle,uint32_t security_flag)330 static uintptr_t eemi_psci_debugfs_handler(uint32_t api_id, uint32_t *pm_arg,
331 void *handle, uint32_t security_flag)
332 {
333 enum pm_ret_status ret;
334
335 switch (api_id) {
336
337 case (uint32_t)PM_SELF_SUSPEND:
338 ret = pm_self_suspend(pm_arg[0], pm_arg[1], pm_arg[2],
339 pm_arg[3], security_flag);
340 SMC_RET1(handle, (u_register_t)ret);
341
342 case (uint32_t)PM_FORCE_POWERDOWN:
343 ret = pm_force_powerdown(pm_arg[0], pm_arg[1], security_flag);
344 SMC_RET1(handle, (u_register_t)ret);
345
346 case (uint32_t)PM_REQ_SUSPEND:
347 ret = pm_req_suspend(pm_arg[0], pm_arg[1], pm_arg[2],
348 pm_arg[3], security_flag);
349 SMC_RET1(handle, (u_register_t)ret);
350
351 case (uint32_t)PM_ABORT_SUSPEND:
352 ret = pm_abort_suspend(pm_arg[0], security_flag);
353 SMC_RET1(handle, (u_register_t)ret);
354
355 case (uint32_t)PM_SYSTEM_SHUTDOWN:
356 ret = pm_system_shutdown(pm_arg[0], pm_arg[1], security_flag);
357 SMC_RET1(handle, (u_register_t)ret);
358
359 default:
360 return (uintptr_t)0;
361 }
362 }
363
364 /**
365 * TF_A_specific_handler() - SMC handler for TF-A specific functionality.
366 * @api_id: identifier for the API being called.
367 * @pm_arg: pointer to the argument data for the API call.
368 * @handle: Pointer to caller's context structure.
369 * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
370 *
371 * These EEMI calls performs functionality that does not require
372 * IPI transaction. The handler ends in TF-A and returns requested data to
373 * kernel from TF-A.
374 *
375 * Return: If TF-A specific API found then, uintptr_t type address, else 0
376 *
377 */
TF_A_specific_handler(uint32_t api_id,uint32_t * pm_arg,void * handle,uint32_t security_flag)378 static uintptr_t TF_A_specific_handler(uint32_t api_id, uint32_t *pm_arg,
379 void *handle, uint32_t security_flag)
380 {
381 switch (api_id) {
382
383 case TF_A_PM_REGISTER_SGI:
384 {
385 int32_t ret;
386
387 ret = pm_register_sgi(pm_arg[0], pm_arg[1]);
388 if (ret != 0) {
389 SMC_RET1(handle, (uint32_t)PM_RET_ERROR_ARGS);
390 }
391
392 SMC_RET1(handle, (uint32_t)PM_RET_SUCCESS);
393 }
394
395 case PM_GET_CALLBACK_DATA:
396 {
397 uint32_t result[4] = {0};
398 enum pm_ret_status ret;
399
400 ret = pm_get_callbackdata(result, ARRAY_SIZE(result), security_flag, 1U);
401 if (ret != 0) {
402 result[0] = ret;
403 }
404
405 SMC_RET2(handle,
406 (uint64_t)result[0] | ((uint64_t)result[1] << 32U),
407 (uint64_t)result[2] | ((uint64_t)result[3] << 32U));
408 }
409
410 case PM_GET_TRUSTZONE_VERSION:
411 SMC_RET1(handle, (uint64_t)PM_RET_SUCCESS |
412 ((uint64_t)TZ_VERSION << 32U));
413
414 default:
415 return (uintptr_t)0;
416 }
417 }
418
419 /**
420 * eemi_handler() - Prepare EEMI payload and perform IPI transaction.
421 * @api_id: identifier for the API being called.
422 * @pm_arg: pointer to the argument data for the API call.
423 * @handle: Pointer to caller's context structure.
424 * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
425 *
426 * EEMI - Embedded Energy Management Interface is Xilinx proprietary protocol
427 * to allow communication between power management controller and different
428 * processing clusters.
429 *
430 * This handler prepares EEMI protocol payload received from kernel and performs
431 * IPI transaction.
432 *
433 * Return: If EEMI API found then, uintptr_t type address, else 0
434 *
435 */
eemi_handler(uint32_t api_id,uint32_t * pm_arg,void * handle,uint32_t security_flag)436 static uintptr_t eemi_handler(uint32_t api_id, uint32_t *pm_arg,
437 void *handle, uint32_t security_flag)
438 {
439 enum pm_ret_status ret;
440 uint32_t buf[PAYLOAD_ARG_CNT] = {0};
441
442 ret = pm_handle_eemi_call(security_flag, api_id, pm_arg[0], pm_arg[1],
443 pm_arg[2], pm_arg[3], pm_arg[4],
444 (uint64_t *)buf);
445 /*
446 * Two IOCTLs, to get clock name and pinctrl name of pm_query_data API
447 * receives 5 words of respoonse from firmware. Currently linux driver can
448 * receive only 4 words from TF-A. So, this needs to be handled separately
449 * than other eemi calls.
450 */
451 if (api_id == (uint32_t)PM_QUERY_DATA) {
452 if ((pm_arg[0] == XPM_QID_CLOCK_GET_NAME ||
453 pm_arg[0] == XPM_QID_PINCTRL_GET_FUNCTION_NAME) &&
454 ret == PM_RET_SUCCESS) {
455 SMC_RET2(handle, (uint64_t)buf[0] | ((uint64_t)buf[1] << 32U),
456 (uint64_t)buf[2] | ((uint64_t)buf[3] << 32U));
457 }
458 }
459
460 SMC_RET2(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U),
461 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U));
462 }
463
464 /**
465 * pm_smc_handler() - SMC handler for PM-API calls coming from EL1/EL2.
466 * @smc_fid: Function Identifier.
467 * @x1: SMC64 Arguments from kernel.
468 * @x2: SMC64 Arguments from kernel.
469 * @x3: SMC64 Arguments from kernel (upper 32-bits).
470 * @x4: Unused.
471 * @cookie: Unused.
472 * @handle: Pointer to caller's context structure.
473 * @flags: SECURE_FLAG or NON_SECURE_FLAG.
474 *
475 * Return: Unused.
476 *
477 * Determines that smc_fid is valid and supported PM SMC Function ID from the
478 * list of pm_api_ids, otherwise completes the request with
479 * the unknown SMC Function ID.
480 *
481 * The SMC calls for PM service are forwarded from SIP Service SMC handler
482 * function with rt_svc_handle signature.
483 *
484 */
pm_smc_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,const void * cookie,void * handle,uint64_t flags)485 uint64_t pm_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
486 uint64_t x4, const void *cookie, void *handle, uint64_t flags)
487 {
488 uintptr_t ret;
489 uint32_t pm_arg[PAYLOAD_ARG_CNT] = {0};
490 uint32_t security_flag = NON_SECURE_FLAG;
491 uint32_t api_id;
492 bool status = false, status_tmp = false;
493
494 /* Handle case where PM wasn't initialized properly */
495 if (pm_up == false) {
496 SMC_RET1(handle, SMC_UNK);
497 }
498
499 /*
500 * Mark BIT24 payload (i.e 1st bit of pm_arg[3] ) as secure (0)
501 * if smc called is secure
502 *
503 * Add redundant macro call to immune the code from glitches
504 */
505 SECURE_REDUNDANT_CALL(status, status_tmp, is_caller_secure, flags);
506 if ((status != false) && (status_tmp != false)) {
507 security_flag = SECURE_FLAG;
508 }
509
510 pm_arg[0] = (uint32_t)x1;
511 pm_arg[1] = (uint32_t)(x1 >> 32U);
512 pm_arg[2] = (uint32_t)x2;
513 pm_arg[3] = (uint32_t)(x2 >> 32U);
514 pm_arg[4] = (uint32_t)x3;
515 (void)(x4);
516 api_id = smc_fid & FUNCID_NUM_MASK;
517
518 ret = eemi_for_compatibility(api_id, pm_arg, handle, security_flag);
519 if (ret != (uintptr_t)0) {
520 return ret;
521 }
522
523 ret = eemi_psci_debugfs_handler(api_id, pm_arg, handle, flags);
524 if (ret != (uintptr_t)0) {
525 return ret;
526 }
527
528 ret = TF_A_specific_handler(api_id, pm_arg, handle, security_flag);
529 if (ret != (uintptr_t)0) {
530 return ret;
531 }
532
533 ret = eemi_handler(api_id, pm_arg, handle, security_flag);
534
535 return ret;
536 }
537