1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 2018 ARM Limited
4 */
5 #ifndef __ASM_VDSO_GETTIMEOFDAY_H
6 #define __ASM_VDSO_GETTIMEOFDAY_H
7
8 #ifndef __ASSEMBLY__
9
10 #include <asm/barrier.h>
11 #include <asm/errno.h>
12 #include <asm/unistd.h>
13 #include <asm/vdso/cp15.h>
14 #include <uapi/linux/time.h>
15
16 #define VDSO_HAS_CLOCK_GETRES 1
17
gettimeofday_fallback(struct __kernel_old_timeval * _tv,struct timezone * _tz)18 static __always_inline int gettimeofday_fallback(
19 struct __kernel_old_timeval *_tv,
20 struct timezone *_tz)
21 {
22 register struct timezone *tz asm("r1") = _tz;
23 register struct __kernel_old_timeval *tv asm("r0") = _tv;
24 register long ret asm ("r0");
25 register long nr asm("r7") = __NR_gettimeofday;
26
27 asm volatile(
28 " swi #0\n"
29 : "=r" (ret)
30 : "r" (tv), "r" (tz), "r" (nr)
31 : "memory");
32
33 return ret;
34 }
35
clock_gettime_fallback(clockid_t _clkid,struct __kernel_timespec * _ts)36 static __always_inline long clock_gettime_fallback(
37 clockid_t _clkid,
38 struct __kernel_timespec *_ts)
39 {
40 register struct __kernel_timespec *ts asm("r1") = _ts;
41 register clockid_t clkid asm("r0") = _clkid;
42 register long ret asm ("r0");
43 register long nr asm("r7") = __NR_clock_gettime64;
44
45 asm volatile(
46 " swi #0\n"
47 : "=r" (ret)
48 : "r" (clkid), "r" (ts), "r" (nr)
49 : "memory");
50
51 return ret;
52 }
53
clock_gettime32_fallback(clockid_t _clkid,struct old_timespec32 * _ts)54 static __always_inline long clock_gettime32_fallback(
55 clockid_t _clkid,
56 struct old_timespec32 *_ts)
57 {
58 register struct old_timespec32 *ts asm("r1") = _ts;
59 register clockid_t clkid asm("r0") = _clkid;
60 register long ret asm ("r0");
61 register long nr asm("r7") = __NR_clock_gettime;
62
63 asm volatile(
64 " swi #0\n"
65 : "=r" (ret)
66 : "r" (clkid), "r" (ts), "r" (nr)
67 : "memory");
68
69 return ret;
70 }
71
clock_getres_fallback(clockid_t _clkid,struct __kernel_timespec * _ts)72 static __always_inline int clock_getres_fallback(
73 clockid_t _clkid,
74 struct __kernel_timespec *_ts)
75 {
76 register struct __kernel_timespec *ts asm("r1") = _ts;
77 register clockid_t clkid asm("r0") = _clkid;
78 register long ret asm ("r0");
79 register long nr asm("r7") = __NR_clock_getres_time64;
80
81 asm volatile(
82 " swi #0\n"
83 : "=r" (ret)
84 : "r" (clkid), "r" (ts), "r" (nr)
85 : "memory");
86
87 return ret;
88 }
89
clock_getres32_fallback(clockid_t _clkid,struct old_timespec32 * _ts)90 static __always_inline int clock_getres32_fallback(
91 clockid_t _clkid,
92 struct old_timespec32 *_ts)
93 {
94 register struct old_timespec32 *ts asm("r1") = _ts;
95 register clockid_t clkid asm("r0") = _clkid;
96 register long ret asm ("r0");
97 register long nr asm("r7") = __NR_clock_getres;
98
99 asm volatile(
100 " swi #0\n"
101 : "=r" (ret)
102 : "r" (clkid), "r" (ts), "r" (nr)
103 : "memory");
104
105 return ret;
106 }
107
arm_vdso_hres_capable(void)108 static inline bool arm_vdso_hres_capable(void)
109 {
110 return IS_ENABLED(CONFIG_ARM_ARCH_TIMER);
111 }
112 #define __arch_vdso_hres_capable arm_vdso_hres_capable
113
__arch_get_hw_counter(int clock_mode,const struct vdso_data * vd)114 static __always_inline u64 __arch_get_hw_counter(int clock_mode,
115 const struct vdso_data *vd)
116 {
117 #ifdef CONFIG_ARM_ARCH_TIMER
118 u64 cycle_now;
119
120 /*
121 * Core checks for mode already, so this raced against a concurrent
122 * update. Return something. Core will do another round and then
123 * see the mode change and fallback to the syscall.
124 */
125 if (clock_mode == VDSO_CLOCKMODE_NONE)
126 return 0;
127
128 isb();
129 cycle_now = read_sysreg(CNTVCT);
130
131 return cycle_now;
132 #else
133 /* Make GCC happy. This is compiled out anyway */
134 return 0;
135 #endif
136 }
137
__arch_get_vdso_data(void)138 static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
139 {
140 return _vdso_data;
141 }
142
143 #endif /* !__ASSEMBLY__ */
144
145 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
146