1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "perfetto/ext/base/metatrace.h"
18
19 #include "perfetto/base/compiler.h"
20 #include "perfetto/base/task_runner.h"
21 #include "perfetto/base/time.h"
22 #include "perfetto/ext/base/file_utils.h"
23 #include "perfetto/ext/base/thread_annotations.h"
24
25 namespace perfetto {
26 namespace metatrace {
27
28 std::atomic<uint32_t> g_enabled_tags{0};
29 std::atomic<uint64_t> g_enabled_timestamp{0};
30
31 // static members
32 std::array<Record, RingBuffer::kCapacity> RingBuffer::records_;
33 std::atomic<bool> RingBuffer::read_task_queued_;
34 std::atomic<uint64_t> RingBuffer::wr_index_;
35 std::atomic<uint64_t> RingBuffer::rd_index_;
36 std::atomic<bool> RingBuffer::has_overruns_;
37 Record RingBuffer::bankruptcy_record_;
38
39 namespace {
40
41 // std::function<> is not trivially de/constructible. This struct wraps it in a
42 // heap-allocated struct to avoid static initializers.
43 struct Delegate {
GetInstanceperfetto::metatrace::__anon906ae12a0111::Delegate44 static Delegate* GetInstance() {
45 static Delegate* instance = new Delegate();
46 return instance;
47 }
48
49 base::TaskRunner* task_runner = nullptr;
50 std::function<void()> read_task;
51 };
52
53 } // namespace
54
Enable(std::function<void ()> read_task,base::TaskRunner * task_runner,uint32_t tags)55 bool Enable(std::function<void()> read_task,
56 base::TaskRunner* task_runner,
57 uint32_t tags) {
58 PERFETTO_DCHECK(read_task);
59 PERFETTO_DCHECK(task_runner->RunsTasksOnCurrentThread());
60 if (g_enabled_tags.load(std::memory_order_acquire))
61 return false;
62
63 Delegate* dg = Delegate::GetInstance();
64 dg->task_runner = task_runner;
65 dg->read_task = std::move(read_task);
66 RingBuffer::Reset();
67 g_enabled_timestamp.store(TraceTimeNowNs(), std::memory_order_relaxed);
68 g_enabled_tags.store(tags, std::memory_order_release);
69 return true;
70 }
71
Disable()72 void Disable() {
73 g_enabled_tags.store(0, std::memory_order_release);
74 Delegate* dg = Delegate::GetInstance();
75 PERFETTO_DCHECK(!dg->task_runner ||
76 dg->task_runner->RunsTasksOnCurrentThread());
77 dg->task_runner = nullptr;
78 dg->read_task = nullptr;
79 }
80
81 // static
Reset()82 void RingBuffer::Reset() {
83 bankruptcy_record_.clear();
84 for (Record& record : records_)
85 record.clear();
86 wr_index_ = 0;
87 rd_index_ = 0;
88 has_overruns_ = false;
89 read_task_queued_ = false;
90 }
91
92 // static
AppendNewRecord()93 Record* RingBuffer::AppendNewRecord() {
94 auto wr_index = wr_index_.fetch_add(1, std::memory_order_acq_rel);
95
96 // rd_index can only monotonically increase, we don't care if we read an
97 // older value, we'll just hit the slow-path a bit earlier if it happens.
98 auto rd_index = rd_index_.load(std::memory_order_relaxed);
99
100 PERFETTO_DCHECK(wr_index >= rd_index);
101 auto size = wr_index - rd_index;
102 if (PERFETTO_LIKELY(size < kCapacity / 2))
103 return At(wr_index);
104
105 // Slow-path: Enqueue the read task and handle overruns.
106 bool expected = false;
107 if (RingBuffer::read_task_queued_.compare_exchange_strong(expected, true)) {
108 Delegate* dg = Delegate::GetInstance();
109 if (dg->task_runner) {
110 dg->task_runner->PostTask([] {
111 // Meta-tracing might have been disabled in the meantime.
112 auto read_task = Delegate::GetInstance()->read_task;
113 if (read_task)
114 read_task();
115 RingBuffer::read_task_queued_ = false;
116 });
117 }
118 }
119
120 if (PERFETTO_LIKELY(size < kCapacity))
121 return At(wr_index);
122
123 has_overruns_.store(true, std::memory_order_release);
124 wr_index_.fetch_sub(1, std::memory_order_acq_rel);
125
126 // In the case of overflows, threads will race writing on the same memory
127 // location and TSan will rightly complain. This is fine though because nobody
128 // will read the bankruptcy record and it's designed to contain garbage.
129 PERFETTO_ANNOTATE_BENIGN_RACE_SIZED(&bankruptcy_record_, sizeof(Record),
130 "nothing reads bankruptcy_record_")
131 return &bankruptcy_record_;
132 }
133
134 // static
IsOnValidTaskRunner()135 bool RingBuffer::IsOnValidTaskRunner() {
136 auto* task_runner = Delegate::GetInstance()->task_runner;
137 return task_runner && task_runner->RunsTasksOnCurrentThread();
138 }
139
140 } // namespace metatrace
141 } // namespace perfetto
142