1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/trace_event/cpufreq_monitor_android.h"
6
7 #include <fcntl.h>
8
9 #include "base/files/file_util.h"
10 #include "base/files/scoped_file.h"
11 #include "base/functional/bind.h"
12 #include "base/memory/scoped_refptr.h"
13 #include "base/no_destructor.h"
14 #include "base/strings/string_number_conversions.h"
15 #include "base/strings/string_split.h"
16 #include "base/strings/stringprintf.h"
17 #include "base/task/task_traits.h"
18 #include "base/task/thread_pool.h"
19 #include "base/trace_event/trace_event.h"
20
21 namespace base {
22
23 namespace trace_event {
24
25 namespace {
26
27 const size_t kNumBytesToReadForSampling = 32;
28 constexpr const char kTraceCategory[] = TRACE_DISABLED_BY_DEFAULT("power");
29 const char kEventTitle[] = "CPU Frequency";
30
31 } // namespace
32
CPUFreqMonitorDelegate()33 CPUFreqMonitorDelegate::CPUFreqMonitorDelegate() {}
34
GetScalingCurFreqPathString(unsigned int cpu_id) const35 std::string CPUFreqMonitorDelegate::GetScalingCurFreqPathString(
36 unsigned int cpu_id) const {
37 return base::StringPrintf(
38 "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_cur_freq", cpu_id);
39 }
40
IsTraceCategoryEnabled() const41 bool CPUFreqMonitorDelegate::IsTraceCategoryEnabled() const {
42 bool enabled;
43 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled);
44 return enabled;
45 }
46
GetKernelMaxCPUs() const47 unsigned int CPUFreqMonitorDelegate::GetKernelMaxCPUs() const {
48 std::string str;
49 if (!base::ReadFileToString(
50 base::FilePath("/sys/devices/system/cpu/kernel_max"), &str)) {
51 // If we fail to read the kernel_max file, we just assume that CPU0 exists.
52 return 0;
53 }
54
55 unsigned int kernel_max_cpu = 0;
56 base::StringToUint(str, &kernel_max_cpu);
57 return kernel_max_cpu;
58 }
59
GetRelatedCPUsPathString(unsigned int cpu_id) const60 std::string CPUFreqMonitorDelegate::GetRelatedCPUsPathString(
61 unsigned int cpu_id) const {
62 return base::StringPrintf(
63 "/sys/devices/system/cpu/cpu%d/cpufreq/related_cpus", cpu_id);
64 }
65
GetCPUIds(std::vector<unsigned int> * ids) const66 void CPUFreqMonitorDelegate::GetCPUIds(std::vector<unsigned int>* ids) const {
67 ids->clear();
68 unsigned int kernel_max_cpu = GetKernelMaxCPUs();
69 // CPUs related to one that's already marked for monitoring get set to "false"
70 // so we don't needlessly monitor CPUs with redundant frequency information.
71 char cpus_to_monitor[kernel_max_cpu + 1];
72 std::memset(cpus_to_monitor, 1, kernel_max_cpu + 1);
73
74 // Rule out the related CPUs for each one so we only end up with the CPUs
75 // that are representative of the cluster.
76 for (unsigned int i = 0; i <= kernel_max_cpu; i++) {
77 if (!cpus_to_monitor[i])
78 continue;
79
80 std::string filename = GetRelatedCPUsPathString(i);
81 std::string line;
82 if (!base::ReadFileToString(base::FilePath(filename), &line))
83 continue;
84 // When reading the related_cpus file, we expected the format to be
85 // something like "0 1 2 3" for CPU0-3 if they're all in one cluster.
86 for (auto& str_piece :
87 base::SplitString(line, " ", base::WhitespaceHandling::TRIM_WHITESPACE,
88 base::SplitResult::SPLIT_WANT_NONEMPTY)) {
89 unsigned int cpu_id;
90 if (base::StringToUint(str_piece, &cpu_id)) {
91 if (cpu_id != i && cpu_id <= kernel_max_cpu)
92 cpus_to_monitor[cpu_id] = 0;
93 }
94 }
95 ids->push_back(i);
96 }
97
98 // If none of the files were readable, we assume CPU0 exists and fall back to
99 // using that.
100 if (ids->size() == 0)
101 ids->push_back(0);
102 }
103
RecordFrequency(unsigned int cpu_id,unsigned int freq)104 void CPUFreqMonitorDelegate::RecordFrequency(unsigned int cpu_id,
105 unsigned int freq) {
106 TRACE_COUNTER_ID1(kTraceCategory, kEventTitle, cpu_id, freq);
107 }
108
109 scoped_refptr<SingleThreadTaskRunner>
CreateTaskRunner()110 CPUFreqMonitorDelegate::CreateTaskRunner() {
111 return base::ThreadPool::CreateSingleThreadTaskRunner(
112 {base::MayBlock(), base::TaskShutdownBehavior::SKIP_ON_SHUTDOWN,
113 base::TaskPriority::BEST_EFFORT},
114 base::SingleThreadTaskRunnerThreadMode::SHARED);
115 }
116
CPUFreqMonitor()117 CPUFreqMonitor::CPUFreqMonitor()
118 : CPUFreqMonitor(std::make_unique<CPUFreqMonitorDelegate>()) {}
119
CPUFreqMonitor(std::unique_ptr<CPUFreqMonitorDelegate> delegate)120 CPUFreqMonitor::CPUFreqMonitor(std::unique_ptr<CPUFreqMonitorDelegate> delegate)
121 : delegate_(std::move(delegate)) {}
122
~CPUFreqMonitor()123 CPUFreqMonitor::~CPUFreqMonitor() {
124 Stop();
125 }
126
127 // static
GetInstance()128 CPUFreqMonitor* CPUFreqMonitor::GetInstance() {
129 static base::NoDestructor<CPUFreqMonitor> instance;
130 return instance.get();
131 }
132
OnTraceLogEnabled()133 void CPUFreqMonitor::OnTraceLogEnabled() {
134 GetOrCreateTaskRunner()->PostTask(
135 FROM_HERE,
136 base::BindOnce(&CPUFreqMonitor::Start, weak_ptr_factory_.GetWeakPtr()));
137 }
138
OnTraceLogDisabled()139 void CPUFreqMonitor::OnTraceLogDisabled() {
140 Stop();
141 }
142
Start()143 void CPUFreqMonitor::Start() {
144 // It's the responsibility of the caller to ensure that Start/Stop are
145 // synchronized. If Start/Stop are called asynchronously where this value
146 // may be incorrect, we have bigger problems.
147 if (is_enabled_.load(std::memory_order_relaxed) ||
148 !delegate_->IsTraceCategoryEnabled()) {
149 return;
150 }
151
152 std::vector<unsigned int> cpu_ids;
153 delegate_->GetCPUIds(&cpu_ids);
154
155 std::vector<std::pair<unsigned int, base::ScopedFD>> fds;
156 for (unsigned int id : cpu_ids) {
157 std::string fstr = delegate_->GetScalingCurFreqPathString(id);
158 int fd = open(fstr.c_str(), O_RDONLY);
159 if (fd == -1)
160 continue;
161
162 fds.emplace_back(std::make_pair(id, base::ScopedFD(fd)));
163 }
164 // We failed to read any scaling_cur_freq files, no point sampling nothing.
165 if (fds.size() == 0)
166 return;
167
168 is_enabled_.store(true, std::memory_order_release);
169
170 GetOrCreateTaskRunner()->PostTask(
171 FROM_HERE,
172 base::BindOnce(&CPUFreqMonitor::Sample, weak_ptr_factory_.GetWeakPtr(),
173 std::move(fds)));
174 }
175
Stop()176 void CPUFreqMonitor::Stop() {
177 is_enabled_.store(false, std::memory_order_release);
178 }
179
Sample(std::vector<std::pair<unsigned int,base::ScopedFD>> fds)180 void CPUFreqMonitor::Sample(
181 std::vector<std::pair<unsigned int, base::ScopedFD>> fds) {
182 // For the same reason as above we use relaxed ordering, because if this value
183 // is in transition and we use acquire ordering then we'll never shut down our
184 // original Sample tasks until the next Stop, so it's still the responsibility
185 // of callers to sync Start/Stop.
186 if (!is_enabled_.load(std::memory_order_relaxed))
187 return;
188
189 for (auto& id_fd : fds) {
190 int fd = id_fd.second.get();
191 unsigned int freq = 0;
192 // If we have trouble reading data from the file for any reason we'll end up
193 // reporting the frequency as nothing.
194 lseek(fd, 0L, SEEK_SET);
195 char data[kNumBytesToReadForSampling];
196
197 ssize_t bytes_read = read(fd, data, kNumBytesToReadForSampling);
198 if (bytes_read > 0) {
199 if (static_cast<size_t>(bytes_read) < kNumBytesToReadForSampling)
200 data[static_cast<size_t>(bytes_read)] = '\0';
201 int ret = sscanf(data, "%d", &freq);
202 if (ret == 0 || ret == std::char_traits<char>::eof())
203 freq = 0;
204 }
205
206 delegate_->RecordFrequency(id_fd.first, freq);
207 }
208
209 GetOrCreateTaskRunner()->PostDelayedTask(
210 FROM_HERE,
211 base::BindOnce(&CPUFreqMonitor::Sample, weak_ptr_factory_.GetWeakPtr(),
212 std::move(fds)),
213 base::Milliseconds(kDefaultCPUFreqSampleIntervalMs));
214 }
215
IsEnabledForTesting()216 bool CPUFreqMonitor::IsEnabledForTesting() {
217 return is_enabled_.load(std::memory_order_acquire);
218 }
219
220 const scoped_refptr<SingleThreadTaskRunner>&
GetOrCreateTaskRunner()221 CPUFreqMonitor::GetOrCreateTaskRunner() {
222 if (!task_runner_)
223 task_runner_ = delegate_->CreateTaskRunner();
224 return task_runner_;
225 }
226
227 } // namespace trace_event
228 } // namespace base
229