xref: /aosp_15_r20/external/cronet/base/trace_event/trace_log.cc (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/trace_event/trace_log.h"
6 
7 #include <cmath>
8 #include <limits>
9 #include <memory>
10 #include <string_view>
11 #include <unordered_set>
12 #include <utility>
13 
14 #include "base/auto_reset.h"
15 #include "base/base_switches.h"
16 #include "base/command_line.h"
17 #include "base/containers/contains.h"
18 #include "base/debug/leak_annotations.h"
19 #include "base/format_macros.h"
20 #include "base/functional/bind.h"
21 #include "base/location.h"
22 #include "base/logging.h"
23 #include "base/memory/ptr_util.h"
24 #include "base/memory/raw_ptr.h"
25 #include "base/memory/raw_ptr_exclusion.h"
26 #include "base/memory/ref_counted_memory.h"
27 #include "base/no_destructor.h"
28 #include "base/notreached.h"
29 #include "base/process/process.h"
30 #include "base/process/process_metrics.h"
31 #include "base/ranges/algorithm.h"
32 #include "base/strings/string_number_conversions.h"
33 #include "base/strings/string_split.h"
34 #include "base/strings/string_tokenizer.h"
35 #include "base/strings/stringprintf.h"
36 #include "base/system/sys_info.h"
37 #include "base/task/current_thread.h"
38 #include "base/task/sequenced_task_runner.h"
39 #include "base/task/single_thread_task_runner.h"
40 #include "base/task/thread_pool.h"
41 #include "base/threading/platform_thread.h"
42 #include "base/threading/thread_id_name_manager.h"
43 #include "base/time/time.h"
44 #include "base/trace_event/heap_profiler.h"
45 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
46 #include "base/trace_event/memory_dump_manager.h"
47 #include "base/trace_event/memory_dump_provider.h"
48 #include "base/trace_event/process_memory_dump.h"
49 #include "base/trace_event/trace_buffer.h"
50 #include "base/trace_event/trace_event.h"
51 #include "build/build_config.h"
52 
53 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
54 #include "base/numerics/safe_conversions.h"
55 #include "base/run_loop.h"
56 #include "base/task/thread_pool/thread_pool_instance.h"
57 #include "base/tracing/perfetto_platform.h"
58 #include "third_party/perfetto/include/perfetto/ext/trace_processor/export_json.h"  // nogncheck
59 #include "third_party/perfetto/include/perfetto/trace_processor/trace_processor_storage.h"  // nogncheck
60 #include "third_party/perfetto/include/perfetto/tracing/console_interceptor.h"
61 #include "third_party/perfetto/protos/perfetto/config/chrome/chrome_config.gen.h"  // nogncheck
62 #include "third_party/perfetto/protos/perfetto/config/interceptor_config.gen.h"  // nogncheck
63 #include "third_party/perfetto/protos/perfetto/trace/track_event/process_descriptor.gen.h"  // nogncheck
64 #include "third_party/perfetto/protos/perfetto/trace/track_event/thread_descriptor.gen.h"  // nogncheck
65 #endif
66 
67 #if BUILDFLAG(IS_WIN)
68 #include "base/trace_event/trace_event_etw_export_win.h"
69 #endif
70 
71 #if BUILDFLAG(IS_ANDROID)
72 #include "base/debug/elf_reader.h"
73 
74 // The linker assigns the virtual address of the start of current library to
75 // this symbol.
76 extern char __executable_start;
77 #endif
78 
79 namespace base {
80 namespace trace_event {
81 
82 namespace {
83 
84 // Controls the number of trace events we will buffer in-memory
85 // before throwing them away.
86 const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
87 
88 const size_t kTraceEventVectorBigBufferChunks =
89     512000000 / kTraceBufferChunkSize;
90 static_assert(
91     kTraceEventVectorBigBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
92     "Too many big buffer chunks");
93 const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
94 static_assert(
95     kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
96     "Too many vector buffer chunks");
97 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
98 
99 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
100 const size_t kEchoToConsoleTraceEventBufferChunks = 256;
101 
102 const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
103 
104 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
105 bool g_perfetto_initialized_by_tracelog = false;
106 #else
107 constexpr TimeDelta kThreadFlushTimeout = Seconds(3);
108 #endif  // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
109 
110 TraceLog* g_trace_log_for_testing = nullptr;
111 
112 ABSL_CONST_INIT thread_local TraceLog::ThreadLocalEventBuffer*
113     thread_local_event_buffer = nullptr;
114 ABSL_CONST_INIT thread_local bool thread_blocks_message_loop = false;
115 ABSL_CONST_INIT thread_local bool thread_is_in_trace_event = false;
116 
ThreadNow()117 ThreadTicks ThreadNow() {
118   return ThreadTicks::IsSupported()
119              ? base::subtle::ThreadTicksNowIgnoringOverride()
120              : ThreadTicks();
121 }
122 
123 template <typename T>
InitializeMetadataEvent(TraceEvent * trace_event,PlatformThreadId thread_id,const char * metadata_name,const char * arg_name,const T & value)124 void InitializeMetadataEvent(TraceEvent* trace_event,
125                              PlatformThreadId thread_id,
126                              const char* metadata_name,
127                              const char* arg_name,
128                              const T& value) {
129   if (!trace_event)
130     return;
131 
132   TraceArguments args(arg_name, value);
133   base::TimeTicks now = TRACE_TIME_TICKS_NOW();
134   ThreadTicks thread_now;
135   trace_event->Reset(
136       thread_id, now, thread_now, TRACE_EVENT_PHASE_METADATA,
137       TraceLog::GetInstance()->GetCategoryGroupEnabled("__metadata"),
138       metadata_name,
139       trace_event_internal::kGlobalScope,  // scope
140       trace_event_internal::kNoId,         // id
141       trace_event_internal::kNoId,         // bind_id
142       &args, TRACE_EVENT_FLAG_NONE);
143 }
144 
145 // Use this function instead of TraceEventHandle constructor to keep the
146 // overhead of ScopedTracer (trace_event.h) constructor minimum.
MakeHandle(uint32_t chunk_seq,size_t chunk_index,size_t event_index,TraceEventHandle * handle)147 void MakeHandle(uint32_t chunk_seq,
148                 size_t chunk_index,
149                 size_t event_index,
150                 TraceEventHandle* handle) {
151   DCHECK(chunk_seq);
152   DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex);
153   DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize);
154   DCHECK(chunk_index <= std::numeric_limits<uint16_t>::max());
155   handle->chunk_seq = chunk_seq;
156   handle->chunk_index = static_cast<uint16_t>(chunk_index);
157   handle->event_index = static_cast<uint16_t>(event_index);
158 }
159 
160 // The fallback arguments filtering function will filter away every argument.
DefaultIsTraceEventArgsAllowlisted(const char * category_group_name,const char * event_name,base::trace_event::ArgumentNameFilterPredicate * arg_name_filter)161 bool DefaultIsTraceEventArgsAllowlisted(
162     const char* category_group_name,
163     const char* event_name,
164     base::trace_event::ArgumentNameFilterPredicate* arg_name_filter) {
165   return false;
166 }
167 
168 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
169 class PerfettoProtoAppender
170     : public base::trace_event::ConvertableToTraceFormat::ProtoAppender {
171  public:
PerfettoProtoAppender(perfetto::protos::pbzero::DebugAnnotation * proto)172   explicit PerfettoProtoAppender(
173       perfetto::protos::pbzero::DebugAnnotation* proto)
174       : annotation_proto_(proto) {}
175   ~PerfettoProtoAppender() override = default;
176 
177   // ProtoAppender implementation
AddBuffer(uint8_t * begin,uint8_t * end)178   void AddBuffer(uint8_t* begin, uint8_t* end) override {
179     ranges_.emplace_back();
180     ranges_.back().begin = begin;
181     ranges_.back().end = end;
182   }
183 
Finalize(uint32_t field_id)184   size_t Finalize(uint32_t field_id) override {
185     return annotation_proto_->AppendScatteredBytes(field_id, ranges_.data(),
186                                                    ranges_.size());
187   }
188 
189  private:
190   std::vector<protozero::ContiguousMemoryRange> ranges_;
191   raw_ptr<perfetto::protos::pbzero::DebugAnnotation> annotation_proto_;
192 };
193 
AddConvertableToTraceFormat(base::trace_event::ConvertableToTraceFormat * value,perfetto::protos::pbzero::DebugAnnotation * annotation)194 void AddConvertableToTraceFormat(
195     base::trace_event::ConvertableToTraceFormat* value,
196     perfetto::protos::pbzero::DebugAnnotation* annotation) {
197   PerfettoProtoAppender proto_appender(annotation);
198   if (value->AppendToProto(&proto_appender)) {
199     return;
200   }
201 
202   std::string json;
203   value->AppendAsTraceFormat(&json);
204   annotation->set_legacy_json_value(json.c_str());
205 }
206 
WriteDebugAnnotations(base::trace_event::TraceEvent * trace_event,perfetto::protos::pbzero::TrackEvent * track_event)207 void WriteDebugAnnotations(base::trace_event::TraceEvent* trace_event,
208                            perfetto::protos::pbzero::TrackEvent* track_event) {
209   for (size_t i = 0; i < trace_event->arg_size() && trace_event->arg_name(i);
210        ++i) {
211     auto type = trace_event->arg_type(i);
212     auto* annotation = track_event->add_debug_annotations();
213 
214     annotation->set_name(trace_event->arg_name(i));
215 
216     if (type == TRACE_VALUE_TYPE_CONVERTABLE) {
217       AddConvertableToTraceFormat(trace_event->arg_convertible_value(i),
218                                   annotation);
219       continue;
220     }
221 
222     auto& value = trace_event->arg_value(i);
223     switch (type) {
224       case TRACE_VALUE_TYPE_BOOL:
225         annotation->set_bool_value(value.as_bool);
226         break;
227       case TRACE_VALUE_TYPE_UINT:
228         annotation->set_uint_value(value.as_uint);
229         break;
230       case TRACE_VALUE_TYPE_INT:
231         annotation->set_int_value(value.as_int);
232         break;
233       case TRACE_VALUE_TYPE_DOUBLE:
234         annotation->set_double_value(value.as_double);
235         break;
236       case TRACE_VALUE_TYPE_POINTER:
237         annotation->set_pointer_value(static_cast<uint64_t>(
238             reinterpret_cast<uintptr_t>(value.as_pointer)));
239         break;
240       case TRACE_VALUE_TYPE_STRING:
241       case TRACE_VALUE_TYPE_COPY_STRING:
242         annotation->set_string_value(value.as_string ? value.as_string
243                                                      : "NULL");
244         break;
245       case TRACE_VALUE_TYPE_PROTO: {
246         auto data = value.as_proto->SerializeAsArray();
247         annotation->AppendRawProtoBytes(data.data(), data.size());
248       } break;
249 
250       default:
251         NOTREACHED() << "Don't know how to serialize this value";
252         break;
253     }
254   }
255 }
256 
OnAddLegacyTraceEvent(TraceEvent * trace_event,bool thread_will_flush,base::trace_event::TraceEventHandle * handle)257 void OnAddLegacyTraceEvent(TraceEvent* trace_event,
258                            bool thread_will_flush,
259                            base::trace_event::TraceEventHandle* handle) {
260   perfetto::DynamicCategory category(
261       TraceLog::GetInstance()->GetCategoryGroupName(
262           trace_event->category_group_enabled()));
263   auto write_args = [trace_event](perfetto::EventContext ctx) {
264     WriteDebugAnnotations(trace_event, ctx.event());
265     uint32_t id_flags = trace_event->flags() & (TRACE_EVENT_FLAG_HAS_ID |
266                                                 TRACE_EVENT_FLAG_HAS_LOCAL_ID |
267                                                 TRACE_EVENT_FLAG_HAS_GLOBAL_ID);
268     if (!id_flags &&
269         perfetto::internal::TrackEventLegacy::PhaseToType(
270             trace_event->phase()) !=
271             perfetto::protos::pbzero::TrackEvent::TYPE_UNSPECIFIED) {
272       return;
273     }
274     auto* legacy_event = ctx.event()->set_legacy_event();
275     legacy_event->set_phase(trace_event->phase());
276     switch (id_flags) {
277       case TRACE_EVENT_FLAG_HAS_ID:
278         legacy_event->set_unscoped_id(trace_event->id());
279         break;
280       case TRACE_EVENT_FLAG_HAS_LOCAL_ID:
281         legacy_event->set_local_id(trace_event->id());
282         break;
283       case TRACE_EVENT_FLAG_HAS_GLOBAL_ID:
284         legacy_event->set_global_id(trace_event->id());
285         break;
286       default:
287         break;
288     }
289   };
290 
291   auto phase = trace_event->phase();
292   auto flags = trace_event->flags();
293   base::TimeTicks timestamp = trace_event->timestamp().is_null()
294                                   ? TRACE_TIME_TICKS_NOW()
295                                   : trace_event->timestamp();
296   if (phase == TRACE_EVENT_PHASE_COMPLETE) {
297     phase = TRACE_EVENT_PHASE_BEGIN;
298   } else if (phase == TRACE_EVENT_PHASE_INSTANT) {
299     auto scope = flags & TRACE_EVENT_FLAG_SCOPE_MASK;
300     switch (scope) {
301       case TRACE_EVENT_SCOPE_GLOBAL:
302         PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(
303             phase, category, trace_event->name(), ::perfetto::Track::Global(0),
304             timestamp, write_args);
305         return;
306       case TRACE_EVENT_SCOPE_PROCESS:
307         PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(
308             phase, category, trace_event->name(),
309             ::perfetto::ProcessTrack::Current(), timestamp, write_args);
310         return;
311       default:
312       case TRACE_EVENT_SCOPE_THREAD: /* Fallthrough. */
313         break;
314     }
315   }
316   if (trace_event->thread_id() &&
317       trace_event->thread_id() != base::PlatformThread::CurrentId()) {
318     PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(
319         phase, category, trace_event->name(),
320         perfetto::ThreadTrack::ForThread(trace_event->thread_id()), timestamp,
321         write_args);
322     return;
323   }
324   PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(
325       phase, category, trace_event->name(),
326       perfetto::internal::TrackEventInternal::kDefaultTrack, timestamp,
327       write_args);
328 }
329 
OnUpdateLegacyTraceEventDuration(const unsigned char * category_group_enabled,const char * name,TraceEventHandle handle,PlatformThreadId thread_id,bool explicit_timestamps,const TimeTicks & now,const ThreadTicks & thread_now)330 void OnUpdateLegacyTraceEventDuration(
331     const unsigned char* category_group_enabled,
332     const char* name,
333     TraceEventHandle handle,
334     PlatformThreadId thread_id,
335     bool explicit_timestamps,
336     const TimeTicks& now,
337     const ThreadTicks& thread_now) {
338   perfetto::DynamicCategory category(
339       TraceLog::GetInstance()->GetCategoryGroupName(category_group_enabled));
340   auto phase = TRACE_EVENT_PHASE_END;
341   base::TimeTicks timestamp =
342       explicit_timestamps ? now : TRACE_TIME_TICKS_NOW();
343   if (thread_id && thread_id != base::PlatformThread::CurrentId()) {
344     PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(
345         phase, category, name, perfetto::ThreadTrack::ForThread(thread_id),
346         timestamp);
347     return;
348   }
349   PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(
350       phase, category, name,
351       perfetto::internal::TrackEventInternal::kDefaultTrack, timestamp);
352 }
353 #endif  // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
354 
355 }  // namespace
356 
357 #if BUILDFLAG(USE_PERFETTO_TRACE_PROCESSOR)
358 namespace {
359 // Perfetto provides us with a fully formed JSON trace file, while
360 // TraceResultBuffer wants individual JSON fragments without a containing
361 // object. We therefore need to strip away the outer object, including the
362 // metadata fields, from the JSON stream.
363 static constexpr char kJsonPrefix[] = "{\"traceEvents\":[\n";
364 static constexpr char kJsonJoiner[] = ",\n";
365 static constexpr char kJsonSuffix[] = "],\"metadata\":";
366 }  // namespace
367 
368 class JsonStringOutputWriter
369     : public perfetto::trace_processor::json::OutputWriter {
370  public:
JsonStringOutputWriter(scoped_refptr<SequencedTaskRunner> flush_task_runner,TraceLog::OutputCallback flush_callback)371   JsonStringOutputWriter(scoped_refptr<SequencedTaskRunner> flush_task_runner,
372                          TraceLog::OutputCallback flush_callback)
373       : flush_task_runner_(flush_task_runner),
374         flush_callback_(std::move(flush_callback)) {
375     buffer_->data().reserve(kBufferReserveCapacity);
376   }
377 
~JsonStringOutputWriter()378   ~JsonStringOutputWriter() override { Flush(/*has_more=*/false); }
379 
AppendString(const std::string & string)380   perfetto::trace_processor::util::Status AppendString(
381       const std::string& string) override {
382     if (!did_strip_prefix_) {
383       DCHECK_EQ(string, kJsonPrefix);
384       did_strip_prefix_ = true;
385       return perfetto::trace_processor::util::OkStatus();
386     } else if (buffer_->data().empty() &&
387                !strncmp(string.c_str(), kJsonJoiner, strlen(kJsonJoiner))) {
388       // We only remove the leading joiner comma for the first chunk in a buffer
389       // since the consumer is expected to insert commas between the buffers we
390       // provide.
391       buffer_->data() += string.substr(strlen(kJsonJoiner));
392     } else if (!strncmp(string.c_str(), kJsonSuffix, strlen(kJsonSuffix))) {
393       return perfetto::trace_processor::util::OkStatus();
394     } else {
395       buffer_->data() += string;
396     }
397     if (buffer_->data().size() > kBufferLimitInBytes) {
398       Flush(/*has_more=*/true);
399       // Reset the buffer_ after moving it above.
400       buffer_ = new RefCountedString();
401       buffer_->data().reserve(kBufferReserveCapacity);
402     }
403     return perfetto::trace_processor::util::OkStatus();
404   }
405 
406  private:
Flush(bool has_more)407   void Flush(bool has_more) {
408     if (flush_task_runner_) {
409       flush_task_runner_->PostTask(
410           FROM_HERE,
411           base::BindOnce(flush_callback_, std::move(buffer_), has_more));
412     } else {
413       flush_callback_.Run(std::move(buffer_), has_more);
414     }
415   }
416 
417   static constexpr size_t kBufferLimitInBytes = 100 * 1024;
418   // Since we write each string before checking the limit, we'll always go
419   // slightly over and hence we reserve some extra space to avoid most
420   // reallocs.
421   static constexpr size_t kBufferReserveCapacity = kBufferLimitInBytes * 5 / 4;
422 
423   scoped_refptr<SequencedTaskRunner> flush_task_runner_;
424   TraceLog::OutputCallback flush_callback_;
425   scoped_refptr<RefCountedString> buffer_ = new RefCountedString();
426   bool did_strip_prefix_ = false;
427 };
428 #endif  // BUILDFLAG(USE_PERFETTO_TRACE_PROCESSOR)
429 
430 // A helper class that allows the lock to be acquired in the middle of the scope
431 // and unlocks at the end of scope if locked.
432 class TraceLog::OptionalAutoLock {
433  public:
OptionalAutoLock(Lock * lock)434   explicit OptionalAutoLock(Lock* lock) : lock_(lock) {}
435 
436   OptionalAutoLock(const OptionalAutoLock&) = delete;
437   OptionalAutoLock& operator=(const OptionalAutoLock&) = delete;
438 
~OptionalAutoLock()439   ~OptionalAutoLock() {
440     if (locked_)
441       lock_->Release();
442   }
443 
EnsureAcquired()444   void EnsureAcquired() EXCLUSIVE_LOCK_FUNCTION(lock_) {
445     if (!locked_) {
446       lock_->Acquire();
447       locked_ = true;
448     } else {
449       lock_->AssertAcquired();
450     }
451   }
452 
453  private:
454   // This field is not a raw_ptr<> because it is needed for lock annotations.
455   RAW_PTR_EXCLUSION Lock* lock_;
456   bool locked_ = false;
457 };
458 
459 class TraceLog::ThreadLocalEventBuffer
460     : public CurrentThread::DestructionObserver,
461       public MemoryDumpProvider {
462  public:
463   explicit ThreadLocalEventBuffer(TraceLog* trace_log);
464   ThreadLocalEventBuffer(const ThreadLocalEventBuffer&) = delete;
465   ThreadLocalEventBuffer& operator=(const ThreadLocalEventBuffer&) = delete;
466   ~ThreadLocalEventBuffer() override;
467 
468   TraceEvent* AddTraceEvent(TraceEventHandle* handle);
469 
GetEventByHandle(TraceEventHandle handle)470   TraceEvent* GetEventByHandle(TraceEventHandle handle) {
471     if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
472         handle.chunk_index != chunk_index_) {
473       return nullptr;
474     }
475 
476     return chunk_->GetEventAt(handle.event_index);
477   }
478 
generation() const479   int generation() const { return generation_; }
480 
481  private:
482   // CurrentThread::DestructionObserver
483   void WillDestroyCurrentMessageLoop() override;
484 
485   // MemoryDumpProvider implementation.
486   bool OnMemoryDump(const MemoryDumpArgs& args,
487                     ProcessMemoryDump* pmd) override;
488 
489   void FlushWhileLocked();
490 
CheckThisIsCurrentBuffer() const491   void CheckThisIsCurrentBuffer() const {
492     DCHECK_EQ(thread_local_event_buffer, this);
493   }
494 
495   const AutoReset<ThreadLocalEventBuffer*> resetter_{&thread_local_event_buffer,
496                                                      this, nullptr};
497   // Since TraceLog is a leaky singleton, trace_log_ will always be valid
498   // as long as the thread exists.
499   raw_ptr<TraceLog> trace_log_;
500   std::unique_ptr<TraceBufferChunk> chunk_;
501   size_t chunk_index_ = 0;
502   int generation_;
503 };
504 
ThreadLocalEventBuffer(TraceLog * trace_log)505 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
506     : trace_log_(trace_log),
507       generation_(trace_log->generation()) {
508   // ThreadLocalEventBuffer is created only if the thread has a message loop, so
509   // the following message_loop won't be NULL.
510   CurrentThread::Get()->AddDestructionObserver(this);
511 
512   // This is to report the local memory usage when memory-infra is enabled.
513   MemoryDumpManager::GetInstance()->RegisterDumpProvider(
514       this, "ThreadLocalEventBuffer",
515       SingleThreadTaskRunner::GetCurrentDefault());
516 
517   auto thread_id = PlatformThread::CurrentId();
518 
519   AutoLock lock(trace_log->lock_);
520   trace_log->thread_task_runners_[thread_id] =
521       SingleThreadTaskRunner::GetCurrentDefault();
522 }
523 
~ThreadLocalEventBuffer()524 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
525   CheckThisIsCurrentBuffer();
526   CurrentThread::Get()->RemoveDestructionObserver(this);
527   MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
528 
529   AutoLock lock(trace_log_->lock_);
530   FlushWhileLocked();
531   trace_log_->thread_task_runners_.erase(PlatformThread::CurrentId());
532 }
533 
AddTraceEvent(TraceEventHandle * handle)534 TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
535     TraceEventHandle* handle) {
536   CheckThisIsCurrentBuffer();
537 
538   if (chunk_ && chunk_->IsFull()) {
539     AutoLock lock(trace_log_->lock_);
540     FlushWhileLocked();
541     chunk_.reset();
542   }
543   if (!chunk_) {
544     AutoLock lock(trace_log_->lock_);
545     chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
546     trace_log_->CheckIfBufferIsFullWhileLocked();
547   }
548   if (!chunk_)
549     return nullptr;
550 
551   size_t event_index;
552   TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
553   if (trace_event && handle)
554     MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
555 
556   return trace_event;
557 }
558 
WillDestroyCurrentMessageLoop()559 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
560   delete this;
561 }
562 
OnMemoryDump(const MemoryDumpArgs & args,ProcessMemoryDump * pmd)563 bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(const MemoryDumpArgs& args,
564                                                     ProcessMemoryDump* pmd) {
565   if (!chunk_)
566     return true;
567   std::string dump_base_name =
568       "tracing/thread_" + NumberToString(PlatformThread::CurrentId());
569   TraceEventMemoryOverhead overhead;
570   chunk_->EstimateTraceMemoryOverhead(&overhead);
571   overhead.DumpInto(dump_base_name.c_str(), pmd);
572   return true;
573 }
574 
FlushWhileLocked()575 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
576   if (!chunk_)
577     return;
578 
579   trace_log_->lock_.AssertAcquired();
580   if (trace_log_->CheckGeneration(generation_)) {
581     // Return the chunk to the buffer only if the generation matches.
582     trace_log_->logged_events_->ReturnChunk(chunk_index_, std::move(chunk_));
583   }
584   // Otherwise this method may be called from the destructor, or TraceLog will
585   // find the generation mismatch and delete this buffer soon.
586 }
587 
SetAddTraceEventOverrides(const AddTraceEventOverrideFunction & add_event_override,const OnFlushFunction & on_flush_override,const UpdateDurationFunction & update_duration_override)588 void TraceLog::SetAddTraceEventOverrides(
589     const AddTraceEventOverrideFunction& add_event_override,
590     const OnFlushFunction& on_flush_override,
591     const UpdateDurationFunction& update_duration_override) {
592   add_trace_event_override_.store(add_event_override);
593   on_flush_override_.store(on_flush_override);
594   update_duration_override_.store(update_duration_override);
595 }
596 
597 struct TraceLog::RegisteredAsyncObserver {
RegisteredAsyncObserverbase::trace_event::TraceLog::RegisteredAsyncObserver598   explicit RegisteredAsyncObserver(WeakPtr<AsyncEnabledStateObserver> observer)
599       : observer(observer),
600         task_runner(SequencedTaskRunner::GetCurrentDefault()) {}
601   ~RegisteredAsyncObserver() = default;
602 
603   WeakPtr<AsyncEnabledStateObserver> observer;
604   scoped_refptr<SequencedTaskRunner> task_runner;
605 };
606 
TraceLogStatus()607 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {}
608 
609 TraceLogStatus::~TraceLogStatus() = default;
610 
611 // static
GetInstance()612 TraceLog* TraceLog::GetInstance() {
613   static base::NoDestructor<TraceLog> instance(0);
614   return instance.get();
615 }
616 
617 // static
ResetForTesting()618 void TraceLog::ResetForTesting() {
619 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
620   auto* self = GetInstance();
621   AutoLock lock(self->observers_lock_);
622   self->enabled_state_observers_.clear();
623   self->owned_enabled_state_observer_copy_.clear();
624   self->async_observers_.clear();
625   self->InitializePerfettoIfNeeded();
626 #else   // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
627   if (!g_trace_log_for_testing)
628     return;
629   {
630     AutoLock lock(g_trace_log_for_testing->lock_);
631     CategoryRegistry::ResetForTesting();
632   }
633   // Don't reset the generation value back to 0. TraceLog is normally
634   // supposed to be a singleton and the value of generation is never
635   // supposed to decrease.
636   const int generation = g_trace_log_for_testing->generation() + 1;
637   g_trace_log_for_testing->~TraceLog();
638   new (g_trace_log_for_testing) TraceLog(generation);
639 #endif  // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
640 }
641 
TraceLog(int generation)642 TraceLog::TraceLog(int generation)
643     : process_sort_index_(0),
644       process_id_hash_(0),
645       process_id_(base::kNullProcessId),
646       trace_options_(kInternalRecordUntilFull),
647       trace_config_(TraceConfig()),
648       thread_shared_chunk_index_(0),
649       generation_(generation),
650       use_worker_thread_(false) {
651   CategoryRegistry::Initialize();
652 
653 #if BUILDFLAG(IS_NACL)  // NaCl shouldn't expose the process id.
654   SetProcessID(0);
655 #else
656   SetProcessID(GetCurrentProcId());
657 #endif
658 
659   logged_events_.reset(CreateTraceBuffer());
660 
661   MemoryDumpManager::GetInstance()->RegisterDumpProvider(this, "TraceLog",
662                                                          nullptr);
663 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
664   TrackEvent::AddSessionObserver(this);
665   // When using the Perfetto client library, TRACE_EVENT macros will bypass
666   // TraceLog entirely. However, trace event embedders which haven't been ported
667   // to Perfetto yet will still be using TRACE_EVENT_API_ADD_TRACE_EVENT, so we
668   // need to route these events to Perfetto using an override here. This
669   // override is also used to capture internal metadata events.
670   SetAddTraceEventOverrides(&OnAddLegacyTraceEvent, nullptr,
671                             &OnUpdateLegacyTraceEventDuration);
672 #endif  // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
673   g_trace_log_for_testing = this;
674 }
675 
~TraceLog()676 TraceLog::~TraceLog() {
677 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
678   TrackEvent::RemoveSessionObserver(this);
679 #endif  // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
680 }
681 
InitializeThreadLocalEventBufferIfSupported()682 void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
683   // A ThreadLocalEventBuffer needs the message loop with a task runner
684   // - to know when the thread exits;
685   // - to handle the final flush.
686   // For a thread without a message loop or if the message loop may be blocked,
687   // the trace events will be added into the main buffer directly.
688   if (thread_blocks_message_loop || !CurrentThread::IsSet() ||
689       !SingleThreadTaskRunner::HasCurrentDefault()) {
690     return;
691   }
692   HEAP_PROFILER_SCOPED_IGNORE;
693   if (thread_local_event_buffer &&
694       !CheckGeneration(thread_local_event_buffer->generation())) {
695     delete thread_local_event_buffer;
696   }
697   if (!thread_local_event_buffer) {
698     thread_local_event_buffer = new ThreadLocalEventBuffer(this);
699   }
700 }
701 
OnMemoryDump(const MemoryDumpArgs & args,ProcessMemoryDump * pmd)702 bool TraceLog::OnMemoryDump(const MemoryDumpArgs& args,
703                             ProcessMemoryDump* pmd) {
704   // TODO(ssid): Use MemoryDumpArgs to create light dumps when requested
705   // (crbug.com/499731).
706   TraceEventMemoryOverhead overhead;
707   overhead.Add(TraceEventMemoryOverhead::kOther, sizeof(*this));
708   {
709     AutoLock lock(lock_);
710     if (logged_events_)
711       logged_events_->EstimateTraceMemoryOverhead(&overhead);
712 
713     for (auto& metadata_event : metadata_events_)
714       metadata_event->EstimateTraceMemoryOverhead(&overhead);
715   }
716   overhead.AddSelf();
717   overhead.DumpInto("tracing/main_trace_log", pmd);
718   return true;
719 }
720 
GetCategoryGroupEnabled(const char * category_group)721 const unsigned char* TraceLog::GetCategoryGroupEnabled(
722     const char* category_group) {
723 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
724   return TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group);
725 #else   // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
726   TraceLog* tracelog = GetInstance();
727   if (!tracelog) {
728     DCHECK(!CategoryRegistry::kCategoryAlreadyShutdown->is_enabled());
729     return CategoryRegistry::kCategoryAlreadyShutdown->state_ptr();
730   }
731   TraceCategory* category = CategoryRegistry::GetCategoryByName(category_group);
732   if (!category) {
733     // Slow path: in the case of a new category we have to repeat the check
734     // holding the lock, as multiple threads might have reached this point
735     // at the same time.
736     auto category_initializer = [](TraceCategory* category) {
737       TraceLog::GetInstance()->UpdateCategoryState(category);
738     };
739     AutoLock lock(tracelog->lock_);
740     CategoryRegistry::GetOrCreateCategoryLocked(
741         category_group, category_initializer, &category);
742   }
743   DCHECK(category->state_ptr());
744   return category->state_ptr();
745 #endif  // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
746 }
747 
GetCategoryGroupName(const unsigned char * category_group_enabled)748 const char* TraceLog::GetCategoryGroupName(
749     const unsigned char* category_group_enabled) {
750 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
751   return TRACE_EVENT_API_GET_CATEGORY_GROUP_NAME(category_group_enabled);
752 #else   // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
753   return CategoryRegistry::GetCategoryByStatePtr(category_group_enabled)
754       ->name();
755 #endif  // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
756 }
757 
UpdateCategoryState(TraceCategory * category)758 void TraceLog::UpdateCategoryState(TraceCategory* category) {
759   lock_.AssertAcquired();
760   DCHECK(category->is_valid());
761   unsigned char state_flags = 0;
762   if (enabled_ && trace_config_.IsCategoryGroupEnabled(category->name())) {
763     state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
764   }
765 
766   // TODO(primiano): this is a temporary workaround for catapult:#2341,
767   // to guarantee that metadata events are always added even if the category
768   // filter is "-*". See crbug.com/618054 for more details and long-term fix.
769   if (enabled_ && category == CategoryRegistry::kCategoryMetadata) {
770     state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
771   }
772 
773 #if BUILDFLAG(IS_WIN)
774   if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
775           category->name())) {
776     state_flags |= TraceCategory::ENABLED_FOR_ETW_EXPORT;
777   }
778 #endif
779 
780   category->set_state(state_flags);
781 }
782 
UpdateCategoryRegistry()783 void TraceLog::UpdateCategoryRegistry() {
784   lock_.AssertAcquired();
785   for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
786     UpdateCategoryState(&category);
787   }
788 }
789 
SetEnabled(const TraceConfig & trace_config,uint8_t modes_to_enable)790 void TraceLog::SetEnabled(const TraceConfig& trace_config,
791                           uint8_t modes_to_enable) {
792   // FILTERING_MODE is no longer supported.
793   DCHECK(modes_to_enable == RECORDING_MODE);
794   DCHECK(trace_config.process_filter_config().IsEnabled(process_id_));
795 
796   AutoLock lock(lock_);
797 
798   // Perfetto only supports basic wildcard filtering, so check that we're not
799   // trying to use more complex filters.
800   for (const auto& excluded :
801        trace_config.category_filter().excluded_categories()) {
802     DCHECK(excluded.find("?") == std::string::npos);
803     DCHECK(excluded.find("*") == std::string::npos ||
804            excluded.find("*") == excluded.size() - 1);
805   }
806   for (const auto& included :
807        trace_config.category_filter().included_categories()) {
808     DCHECK(included.find("?") == std::string::npos);
809     DCHECK(included.find("*") == std::string::npos ||
810            included.find("*") == included.size() - 1);
811   }
812   for (const auto& disabled :
813        trace_config.category_filter().disabled_categories()) {
814     DCHECK(disabled.find("?") == std::string::npos);
815     DCHECK(disabled.find("*") == std::string::npos ||
816            disabled.find("*") == disabled.size() - 1);
817   }
818 
819 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
820   DCHECK(!trace_config.IsArgumentFilterEnabled());
821 
822   // TODO(khokhlov): Avoid duplication between this code and
823   // services/tracing/public/cpp/perfetto/perfetto_config.cc.
824   perfetto::TraceConfig perfetto_config;
825   size_t size_limit = trace_config.GetTraceBufferSizeInKb();
826   if (size_limit == 0)
827     size_limit = 200 * 1024;
828   auto* buffer_config = perfetto_config.add_buffers();
829   buffer_config->set_size_kb(checked_cast<uint32_t>(size_limit));
830   switch (trace_config.GetTraceRecordMode()) {
831     case base::trace_event::RECORD_UNTIL_FULL:
832     case base::trace_event::RECORD_AS_MUCH_AS_POSSIBLE:
833       buffer_config->set_fill_policy(
834           perfetto::TraceConfig::BufferConfig::DISCARD);
835       break;
836     case base::trace_event::RECORD_CONTINUOUSLY:
837       buffer_config->set_fill_policy(
838           perfetto::TraceConfig::BufferConfig::RING_BUFFER);
839       break;
840     case base::trace_event::ECHO_TO_CONSOLE:
841       // Handled below.
842       break;
843   }
844 
845   // Add the track event data source.
846   // TODO(skyostil): Configure kTraceClockId as the primary trace clock.
847   auto* data_source = perfetto_config.add_data_sources();
848   auto* source_config = data_source->mutable_config();
849   source_config->set_name("track_event");
850   source_config->set_target_buffer(0);
851   auto* source_chrome_config = source_config->mutable_chrome_config();
852   source_chrome_config->set_trace_config(trace_config.ToString());
853   source_chrome_config->set_convert_to_legacy_json(true);
854 
855   if (trace_config.GetTraceRecordMode() == base::trace_event::ECHO_TO_CONSOLE) {
856     perfetto::ConsoleInterceptor::Register();
857     source_config->mutable_interceptor_config()->set_name("console");
858   }
859 
860   source_config->set_track_event_config_raw(
861       trace_config.ToPerfettoTrackEventConfigRaw(
862           /*privacy_filtering_enabled = */ false));
863 
864   if (trace_config.IsCategoryGroupEnabled("disabled-by-default-memory-infra")) {
865     data_source = perfetto_config.add_data_sources();
866     source_config = data_source->mutable_config();
867     source_config->set_name("org.chromium.memory_instrumentation");
868     source_config->set_target_buffer(0);
869     source_chrome_config = source_config->mutable_chrome_config();
870     source_chrome_config->set_trace_config(trace_config.ToString());
871     source_chrome_config->set_convert_to_legacy_json(true);
872   }
873 
874   // Clear incremental state every 0.5 seconds, so that we lose at most the
875   // first 0.5 seconds of the trace (if we wrap around Perfetto's central
876   // buffer).
877   // This value strikes balance between minimizing interned data overhead, and
878   // reducing the risk of data loss in ring buffer mode.
879   perfetto_config.mutable_incremental_state_config()->set_clear_period_ms(500);
880 
881   SetEnabledImpl(trace_config, perfetto_config);
882 #else   // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
883   // Can't enable tracing when Flush() is in progress.
884   DCHECK(!flush_task_runner_);
885 
886   InternalTraceOptions new_options =
887       GetInternalOptionsFromTraceConfig(trace_config);
888 
889   InternalTraceOptions old_options = trace_options();
890 
891   if (dispatching_to_observers_) {
892     // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
893     DLOG(ERROR)
894         << "Cannot manipulate TraceLog::Enabled state from an observer.";
895     return;
896   }
897 
898   // Update trace config for recording.
899   const bool already_recording = enabled_;
900   if (already_recording) {
901     trace_config_.Merge(trace_config);
902   } else {
903     trace_config_ = trace_config;
904   }
905 
906   enabled_ = true;
907   UpdateCategoryRegistry();
908 
909   // Do not notify observers or create trace buffer if only enabled for
910   // filtering or if recording was already enabled.
911   if (already_recording)
912     return;
913 
914   // Discard events if new trace options are different. Reducing trace buffer
915   // size is not supported while already recording, so only replace trace
916   // buffer if we were not already recording.
917   if (new_options != old_options ||
918       (trace_config_.GetTraceBufferSizeInEvents() && !already_recording)) {
919     trace_options_.store(new_options, std::memory_order_relaxed);
920     UseNextTraceBuffer();
921   }
922 
923   num_traces_recorded_++;
924 
925   UpdateCategoryRegistry();
926 
927   dispatching_to_observers_ = true;
928   {
929     // Notify observers outside of the thread events lock, so they can trigger
930     // trace events.
931     AutoUnlock unlock(lock_);
932     AutoLock lock2(observers_lock_);
933     for (EnabledStateObserver* observer : enabled_state_observers_)
934       observer->OnTraceLogEnabled();
935     for (const auto& it : async_observers_) {
936       it.second.task_runner->PostTask(
937           FROM_HERE, BindOnce(&AsyncEnabledStateObserver::OnTraceLogEnabled,
938                               it.second.observer));
939     }
940   }
941   dispatching_to_observers_ = false;
942 #endif  // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
943 }
944 
945 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
GetCurrentTrackEventDataSourceConfig() const946 perfetto::DataSourceConfig TraceLog::GetCurrentTrackEventDataSourceConfig()
947     const {
948   AutoLock lock(track_event_lock_);
949   if (track_event_sessions_.empty()) {
950     return perfetto::DataSourceConfig();
951   }
952   return track_event_sessions_[0].config;
953 }
954 
InitializePerfettoIfNeeded()955 void TraceLog::InitializePerfettoIfNeeded() {
956   // When we're using the Perfetto client library, only tests should be
957   // recording traces directly through TraceLog. Production code should instead
958   // use perfetto::Tracing::NewTrace(). Let's make sure the tracing service
959   // didn't already initialize Perfetto in this process, because it's not safe
960   // to consume trace data from arbitrary processes through TraceLog as the JSON
961   // conversion here isn't sandboxed like with the real tracing service.
962   //
963   // Note that initializing Perfetto here requires the thread pool to be ready.
964   CHECK(!perfetto::Tracing::IsInitialized() ||
965         g_perfetto_initialized_by_tracelog)
966       << "Don't use TraceLog for recording traces from non-test code. Use "
967          "perfetto::Tracing::NewTrace() instead.";
968 
969   if (perfetto::Tracing::IsInitialized())
970     return;
971   g_perfetto_initialized_by_tracelog = true;
972   perfetto::TracingInitArgs init_args;
973   init_args.backends = perfetto::BackendType::kInProcessBackend;
974   init_args.shmem_batch_commits_duration_ms = 1000;
975   init_args.shmem_size_hint_kb = 4 * 1024;
976   init_args.shmem_direct_patching_enabled = true;
977   init_args.disallow_merging_with_system_tracks = true;
978   perfetto::Tracing::Initialize(init_args);
979   TrackEvent::Register();
980 }
981 
IsPerfettoInitializedByTraceLog() const982 bool TraceLog::IsPerfettoInitializedByTraceLog() const {
983   return g_perfetto_initialized_by_tracelog;
984 }
985 
SetEnabled(const TraceConfig & trace_config,const perfetto::TraceConfig & perfetto_config)986 void TraceLog::SetEnabled(const TraceConfig& trace_config,
987                           const perfetto::TraceConfig& perfetto_config) {
988   AutoLock lock(lock_);
989   SetEnabledImpl(trace_config, perfetto_config);
990 }
991 
SetEnabledImpl(const TraceConfig & trace_config,const perfetto::TraceConfig & perfetto_config)992 void TraceLog::SetEnabledImpl(const TraceConfig& trace_config,
993                               const perfetto::TraceConfig& perfetto_config) {
994   DCHECK(!TrackEvent::IsEnabled());
995   lock_.AssertAcquired();
996   InitializePerfettoIfNeeded();
997   trace_config_ = trace_config;
998   perfetto_config_ = perfetto_config;
999   tracing_session_ = perfetto::Tracing::NewTrace();
1000 
1001   AutoUnlock unlock(lock_);
1002   tracing_session_->Setup(perfetto_config);
1003   tracing_session_->StartBlocking();
1004 }
1005 #endif  // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
1006 
SetArgumentFilterPredicate(const ArgumentFilterPredicate & argument_filter_predicate)1007 void TraceLog::SetArgumentFilterPredicate(
1008     const ArgumentFilterPredicate& argument_filter_predicate) {
1009   AutoLock lock(lock_);
1010   DCHECK(!argument_filter_predicate.is_null());
1011   // Replace the existing argument filter.
1012   argument_filter_predicate_ = argument_filter_predicate;
1013 }
1014 
GetArgumentFilterPredicate() const1015 ArgumentFilterPredicate TraceLog::GetArgumentFilterPredicate() const {
1016   AutoLock lock(lock_);
1017   return argument_filter_predicate_;
1018 }
1019 
SetMetadataFilterPredicate(const MetadataFilterPredicate & metadata_filter_predicate)1020 void TraceLog::SetMetadataFilterPredicate(
1021     const MetadataFilterPredicate& metadata_filter_predicate) {
1022   AutoLock lock(lock_);
1023   DCHECK(!metadata_filter_predicate.is_null());
1024   // Replace the existing argument filter.
1025   metadata_filter_predicate_ = metadata_filter_predicate;
1026 }
1027 
GetMetadataFilterPredicate() const1028 MetadataFilterPredicate TraceLog::GetMetadataFilterPredicate() const {
1029   AutoLock lock(lock_);
1030   return metadata_filter_predicate_;
1031 }
1032 
SetRecordHostAppPackageName(bool record_host_app_package_name)1033 void TraceLog::SetRecordHostAppPackageName(bool record_host_app_package_name) {
1034   record_host_app_package_name_ = record_host_app_package_name;
1035 }
1036 
ShouldRecordHostAppPackageName() const1037 bool TraceLog::ShouldRecordHostAppPackageName() const {
1038   return record_host_app_package_name_;
1039 }
1040 
GetInternalOptionsFromTraceConfig(const TraceConfig & config)1041 TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceConfig(
1042     const TraceConfig& config) {
1043   InternalTraceOptions ret = config.IsArgumentFilterEnabled()
1044                                  ? kInternalEnableArgumentFilter
1045                                  : kInternalNone;
1046   switch (config.GetTraceRecordMode()) {
1047     case RECORD_UNTIL_FULL:
1048       return ret | kInternalRecordUntilFull;
1049     case RECORD_CONTINUOUSLY:
1050       return ret | kInternalRecordContinuously;
1051     case ECHO_TO_CONSOLE:
1052       return ret | kInternalEchoToConsole;
1053     case RECORD_AS_MUCH_AS_POSSIBLE:
1054       return ret | kInternalRecordAsMuchAsPossible;
1055   }
1056   NOTREACHED();
1057   return kInternalNone;
1058 }
1059 
GetCurrentTraceConfig() const1060 TraceConfig TraceLog::GetCurrentTraceConfig() const {
1061 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
1062   const auto chrome_config =
1063       GetCurrentTrackEventDataSourceConfig().chrome_config();
1064   return TraceConfig(chrome_config.trace_config());
1065 #else   // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
1066   AutoLock lock(lock_);
1067   return trace_config_;
1068 #endif  // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
1069 }
1070 
SetDisabled()1071 void TraceLog::SetDisabled() {
1072   AutoLock lock(lock_);
1073   SetDisabledWhileLocked(RECORDING_MODE);
1074 }
1075 
SetDisabled(uint8_t modes_to_disable)1076 void TraceLog::SetDisabled(uint8_t modes_to_disable) {
1077   AutoLock lock(lock_);
1078   SetDisabledWhileLocked(modes_to_disable);
1079 }
1080 
SetDisabledWhileLocked(uint8_t modes_to_disable)1081 void TraceLog::SetDisabledWhileLocked(uint8_t modes_to_disable) {
1082   DCHECK(modes_to_disable == RECORDING_MODE);
1083 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
1084   if (!tracing_session_)
1085     return;
1086 
1087   AddMetadataEventsWhileLocked();
1088   // Remove metadata events so they will not get added to a subsequent trace.
1089   metadata_events_.clear();
1090 
1091   TrackEvent::Flush();
1092   // If the current thread has an active task runner, allow nested tasks to run
1093   // while stopping the session. This is needed by some tests, e.g., to allow
1094   // data sources to properly flush themselves.
1095   if (SingleThreadTaskRunner::HasCurrentDefault()) {
1096     RunLoop stop_loop(RunLoop::Type::kNestableTasksAllowed);
1097     auto quit_closure = stop_loop.QuitClosure();
1098     tracing_session_->SetOnStopCallback(
1099         [&quit_closure] { quit_closure.Run(); });
1100     tracing_session_->Stop();
1101     AutoUnlock unlock(lock_);
1102     stop_loop.Run();
1103   } else {
1104     tracing_session_->StopBlocking();
1105   }
1106 #else   // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
1107 
1108   if (dispatching_to_observers_) {
1109     // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
1110     DLOG(ERROR)
1111         << "Cannot manipulate TraceLog::Enabled state from an observer.";
1112     return;
1113   }
1114 
1115   enabled_ = false;
1116   trace_config_.Clear();
1117   UpdateCategoryRegistry();
1118 
1119   AddMetadataEventsWhileLocked();
1120 
1121   // Remove metadata events so they will not get added to a subsequent trace.
1122   metadata_events_.clear();
1123 
1124   dispatching_to_observers_ = true;
1125   {
1126     // Release trace events lock, so observers can trigger trace events.
1127     AutoUnlock unlock(lock_);
1128     AutoLock lock2(observers_lock_);
1129     for (base::trace_event::TraceLog::EnabledStateObserver* it :
1130          enabled_state_observers_) {
1131       it->OnTraceLogDisabled();
1132     }
1133     for (const auto& it : async_observers_) {
1134       it.second.task_runner->PostTask(
1135           FROM_HERE, BindOnce(&AsyncEnabledStateObserver::OnTraceLogDisabled,
1136                               it.second.observer));
1137     }
1138   }
1139   dispatching_to_observers_ = false;
1140 #endif  // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
1141 }
1142 
GetNumTracesRecorded()1143 int TraceLog::GetNumTracesRecorded() {
1144   AutoLock lock(lock_);
1145   return enabled_ ? num_traces_recorded_ : -1;
1146 }
1147 
AddEnabledStateObserver(EnabledStateObserver * listener)1148 void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
1149   AutoLock lock(observers_lock_);
1150   enabled_state_observers_.push_back(listener);
1151 }
1152 
RemoveEnabledStateObserver(EnabledStateObserver * listener)1153 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
1154   AutoLock lock(observers_lock_);
1155   enabled_state_observers_.erase(
1156       ranges::remove(enabled_state_observers_, listener),
1157       enabled_state_observers_.end());
1158 }
1159 
AddOwnedEnabledStateObserver(std::unique_ptr<EnabledStateObserver> listener)1160 void TraceLog::AddOwnedEnabledStateObserver(
1161     std::unique_ptr<EnabledStateObserver> listener) {
1162   AutoLock lock(observers_lock_);
1163   enabled_state_observers_.push_back(listener.get());
1164   owned_enabled_state_observer_copy_.push_back(std::move(listener));
1165 }
1166 
HasEnabledStateObserver(EnabledStateObserver * listener) const1167 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
1168   AutoLock lock(observers_lock_);
1169   return Contains(enabled_state_observers_, listener);
1170 }
1171 
AddAsyncEnabledStateObserver(WeakPtr<AsyncEnabledStateObserver> listener)1172 void TraceLog::AddAsyncEnabledStateObserver(
1173     WeakPtr<AsyncEnabledStateObserver> listener) {
1174   AutoLock lock(observers_lock_);
1175   async_observers_.emplace(listener.get(), RegisteredAsyncObserver(listener));
1176 }
1177 
RemoveAsyncEnabledStateObserver(AsyncEnabledStateObserver * listener)1178 void TraceLog::RemoveAsyncEnabledStateObserver(
1179     AsyncEnabledStateObserver* listener) {
1180   AutoLock lock(observers_lock_);
1181   async_observers_.erase(listener);
1182 }
1183 
HasAsyncEnabledStateObserver(AsyncEnabledStateObserver * listener) const1184 bool TraceLog::HasAsyncEnabledStateObserver(
1185     AsyncEnabledStateObserver* listener) const {
1186   AutoLock lock(observers_lock_);
1187   return Contains(async_observers_, listener);
1188 }
1189 
AddIncrementalStateObserver(IncrementalStateObserver * listener)1190 void TraceLog::AddIncrementalStateObserver(IncrementalStateObserver* listener) {
1191   AutoLock lock(observers_lock_);
1192   incremental_state_observers_.push_back(listener);
1193 }
1194 
RemoveIncrementalStateObserver(IncrementalStateObserver * listener)1195 void TraceLog::RemoveIncrementalStateObserver(
1196     IncrementalStateObserver* listener) {
1197   AutoLock lock(observers_lock_);
1198   incremental_state_observers_.erase(
1199       ranges::remove(incremental_state_observers_, listener),
1200       incremental_state_observers_.end());
1201 }
1202 
OnIncrementalStateCleared()1203 void TraceLog::OnIncrementalStateCleared() {
1204   AutoLock lock(observers_lock_);
1205   for (IncrementalStateObserver* observer : incremental_state_observers_)
1206     observer->OnIncrementalStateCleared();
1207 }
1208 
GetStatus() const1209 TraceLogStatus TraceLog::GetStatus() const {
1210   AutoLock lock(lock_);
1211   TraceLogStatus result;
1212   result.event_capacity = static_cast<uint32_t>(logged_events_->Capacity());
1213   result.event_count = static_cast<uint32_t>(logged_events_->Size());
1214   return result;
1215 }
1216 
1217 #if !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
BufferIsFull() const1218 bool TraceLog::BufferIsFull() const {
1219   AutoLock lock(lock_);
1220   return logged_events_->IsFull();
1221 }
1222 #endif  // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
1223 
AddEventToThreadSharedChunkWhileLocked(TraceEventHandle * handle,bool check_buffer_is_full)1224 TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
1225     TraceEventHandle* handle,
1226     bool check_buffer_is_full) {
1227   if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
1228     logged_events_->ReturnChunk(thread_shared_chunk_index_,
1229                                 std::move(thread_shared_chunk_));
1230   }
1231 
1232   if (!thread_shared_chunk_) {
1233     thread_shared_chunk_ =
1234         logged_events_->GetChunk(&thread_shared_chunk_index_);
1235     if (check_buffer_is_full)
1236       CheckIfBufferIsFullWhileLocked();
1237   }
1238   if (!thread_shared_chunk_)
1239     return nullptr;
1240 
1241   size_t event_index;
1242   TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
1243   if (trace_event && handle) {
1244     MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_,
1245                event_index, handle);
1246   }
1247   return trace_event;
1248 }
1249 
CheckIfBufferIsFullWhileLocked()1250 void TraceLog::CheckIfBufferIsFullWhileLocked() {
1251   if (logged_events_->IsFull()) {
1252     if (buffer_limit_reached_timestamp_.is_null()) {
1253       buffer_limit_reached_timestamp_ = OffsetNow();
1254     }
1255     SetDisabledWhileLocked(RECORDING_MODE);
1256   }
1257 }
1258 
1259 // Flush() works as the following:
1260 // 1. Flush() is called in thread A whose task runner is saved in
1261 //    flush_task_runner_;
1262 // 2. If thread_message_loops_ is not empty, thread A posts task to each message
1263 //    loop to flush the thread local buffers; otherwise finish the flush;
1264 // 3. FlushCurrentThread() deletes the thread local event buffer:
1265 //    - The last batch of events of the thread are flushed into the main buffer;
1266 //    - The message loop will be removed from thread_message_loops_;
1267 //    If this is the last message loop, finish the flush;
1268 // 4. If any thread hasn't finish its flush in time, finish the flush.
Flush(const TraceLog::OutputCallback & cb,bool use_worker_thread)1269 void TraceLog::Flush(const TraceLog::OutputCallback& cb,
1270                      bool use_worker_thread) {
1271   FlushInternal(cb, use_worker_thread, false);
1272 }
1273 
CancelTracing(const OutputCallback & cb)1274 void TraceLog::CancelTracing(const OutputCallback& cb) {
1275   SetDisabled();
1276   FlushInternal(cb, false, true);
1277 }
1278 
FlushInternal(const TraceLog::OutputCallback & cb,bool use_worker_thread,bool discard_events)1279 void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb,
1280                              bool use_worker_thread,
1281                              bool discard_events) {
1282   use_worker_thread_ = use_worker_thread;
1283 
1284 #if BUILDFLAG(USE_PERFETTO_TRACE_PROCESSOR)
1285   TrackEvent::Flush();
1286 
1287   if (!tracing_session_ || discard_events) {
1288     tracing_session_.reset();
1289     scoped_refptr<RefCountedString> empty_result = new RefCountedString;
1290     cb.Run(empty_result, /*has_more_events=*/false);
1291     return;
1292   }
1293 
1294   bool convert_to_json = true;
1295   for (const auto& data_source : perfetto_config_.data_sources()) {
1296     if (data_source.config().has_chrome_config() &&
1297         data_source.config().chrome_config().has_convert_to_legacy_json()) {
1298       convert_to_json =
1299           data_source.config().chrome_config().convert_to_legacy_json();
1300       break;
1301     }
1302   }
1303 
1304   if (convert_to_json) {
1305     perfetto::trace_processor::Config processor_config;
1306     trace_processor_ =
1307         perfetto::trace_processor::TraceProcessorStorage::CreateInstance(
1308             processor_config);
1309     json_output_writer_.reset(new JsonStringOutputWriter(
1310         use_worker_thread ? SingleThreadTaskRunner::GetCurrentDefault()
1311                           : nullptr,
1312         cb));
1313   } else {
1314     proto_output_callback_ = std::move(cb);
1315   }
1316 
1317   if (use_worker_thread) {
1318     tracing_session_->ReadTrace(
1319         [this](perfetto::TracingSession::ReadTraceCallbackArgs args) {
1320           OnTraceData(args.data, args.size, args.has_more);
1321         });
1322   } else {
1323     auto data = tracing_session_->ReadTraceBlocking();
1324     OnTraceData(data.data(), data.size(), /*has_more=*/false);
1325   }
1326 #elif BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
1327   // Trace processor isn't enabled so we can't convert the resulting trace into
1328   // JSON.
1329   CHECK(false) << "JSON tracing isn't supported";
1330 #else
1331   if (IsEnabled()) {
1332     // Can't flush when tracing is enabled because otherwise PostTask would
1333     // - generate more trace events;
1334     // - deschedule the calling thread on some platforms causing inaccurate
1335     //   timing of the trace events.
1336     scoped_refptr<RefCountedString> empty_result = new RefCountedString;
1337     if (!cb.is_null())
1338       cb.Run(empty_result, false);
1339     LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled";
1340     return;
1341   }
1342 
1343   int gen = generation();
1344   // Copy of thread_task_runners_ to be used without locking.
1345   std::vector<scoped_refptr<SingleThreadTaskRunner>> task_runners;
1346   {
1347     AutoLock lock(lock_);
1348     DCHECK(!flush_task_runner_);
1349     flush_task_runner_ = SequencedTaskRunner::HasCurrentDefault()
1350                              ? SequencedTaskRunner::GetCurrentDefault()
1351                              : nullptr;
1352     DCHECK(thread_task_runners_.empty() || flush_task_runner_);
1353     flush_output_callback_ = cb;
1354 
1355     if (thread_shared_chunk_) {
1356       logged_events_->ReturnChunk(thread_shared_chunk_index_,
1357                                   std::move(thread_shared_chunk_));
1358     }
1359 
1360     for (const auto& it : thread_task_runners_)
1361       task_runners.push_back(it.second);
1362   }
1363 
1364   if (!task_runners.empty()) {
1365     for (auto& task_runner : task_runners) {
1366       task_runner->PostTask(
1367           FROM_HERE, BindOnce(&TraceLog::FlushCurrentThread, Unretained(this),
1368                               gen, discard_events));
1369     }
1370     flush_task_runner_->PostDelayedTask(
1371         FROM_HERE,
1372         BindOnce(&TraceLog::OnFlushTimeout, Unretained(this), gen,
1373                  discard_events),
1374         kThreadFlushTimeout);
1375     return;
1376   }
1377 
1378   FinishFlush(gen, discard_events);
1379 #endif  // BUILDFLAG(USE_PERFETTO_TRACE_PROCESSOR)
1380 }
1381 
1382 #if BUILDFLAG(USE_PERFETTO_TRACE_PROCESSOR)
OnTraceData(const char * data,size_t size,bool has_more)1383 void TraceLog::OnTraceData(const char* data, size_t size, bool has_more) {
1384   if (proto_output_callback_) {
1385     scoped_refptr<RefCountedString> chunk = new RefCountedString();
1386     if (size)
1387       chunk->data().assign(data, size);
1388     proto_output_callback_.Run(std::move(chunk), has_more);
1389     if (!has_more) {
1390       proto_output_callback_.Reset();
1391       tracing_session_.reset();
1392     }
1393     return;
1394   }
1395   if (size) {
1396     std::unique_ptr<uint8_t[]> data_copy(new uint8_t[size]);
1397     memcpy(&data_copy[0], data, size);
1398     auto status = trace_processor_->Parse(std::move(data_copy), size);
1399     DCHECK(status.ok()) << status.message();
1400   }
1401   if (has_more)
1402     return;
1403   trace_processor_->NotifyEndOfFile();
1404 
1405   auto status = perfetto::trace_processor::json::ExportJson(
1406       trace_processor_.get(), json_output_writer_.get());
1407   DCHECK(status.ok()) << status.message();
1408   trace_processor_.reset();
1409   tracing_session_.reset();
1410   json_output_writer_.reset();
1411 }
1412 #endif  // BUILDFLAG(USE_PERFETTO_TRACE_PROCESSOR)
1413 
1414 // Usually it runs on a different thread.
ConvertTraceEventsToTraceFormat(std::unique_ptr<TraceBuffer> logged_events,const OutputCallback & flush_output_callback,const ArgumentFilterPredicate & argument_filter_predicate)1415 void TraceLog::ConvertTraceEventsToTraceFormat(
1416     std::unique_ptr<TraceBuffer> logged_events,
1417     const OutputCallback& flush_output_callback,
1418     const ArgumentFilterPredicate& argument_filter_predicate) {
1419   if (flush_output_callback.is_null())
1420     return;
1421 
1422   HEAP_PROFILER_SCOPED_IGNORE;
1423   // The callback need to be called at least once even if there is no events
1424   // to let the caller know the completion of flush.
1425   scoped_refptr<RefCountedString> json_events_str_ptr = new RefCountedString();
1426   const size_t kReserveCapacity = kTraceEventBufferSizeInBytes * 5 / 4;
1427   json_events_str_ptr->data().reserve(kReserveCapacity);
1428   while (const TraceBufferChunk* chunk = logged_events->NextChunk()) {
1429     for (size_t j = 0; j < chunk->size(); ++j) {
1430       size_t size = json_events_str_ptr->size();
1431       if (size > kTraceEventBufferSizeInBytes) {
1432         flush_output_callback.Run(json_events_str_ptr, true);
1433         json_events_str_ptr = new RefCountedString();
1434         json_events_str_ptr->data().reserve(kReserveCapacity);
1435       } else if (size) {
1436         json_events_str_ptr->data().append(",\n");
1437       }
1438       chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()),
1439                                          argument_filter_predicate);
1440     }
1441   }
1442   flush_output_callback.Run(json_events_str_ptr, false);
1443 }
1444 
FinishFlush(int generation,bool discard_events)1445 void TraceLog::FinishFlush(int generation, bool discard_events) {
1446   std::unique_ptr<TraceBuffer> previous_logged_events;
1447   OutputCallback flush_output_callback;
1448   ArgumentFilterPredicate argument_filter_predicate;
1449 
1450   if (!CheckGeneration(generation))
1451     return;
1452 
1453   {
1454     AutoLock lock(lock_);
1455 
1456     previous_logged_events.swap(logged_events_);
1457     UseNextTraceBuffer();
1458     thread_task_runners_.clear();
1459 
1460     flush_task_runner_ = nullptr;
1461     flush_output_callback = flush_output_callback_;
1462     flush_output_callback_.Reset();
1463 
1464     if (trace_options() & kInternalEnableArgumentFilter) {
1465       // If argument filtering is activated and there is no filtering predicate,
1466       // use the safe default filtering predicate.
1467       if (argument_filter_predicate_.is_null()) {
1468         argument_filter_predicate =
1469             base::BindRepeating(&DefaultIsTraceEventArgsAllowlisted);
1470       } else {
1471         argument_filter_predicate = argument_filter_predicate_;
1472       }
1473     }
1474   }
1475 
1476   if (discard_events) {
1477     if (!flush_output_callback.is_null()) {
1478       scoped_refptr<RefCountedString> empty_result = new RefCountedString;
1479       flush_output_callback.Run(empty_result, false);
1480     }
1481     return;
1482   }
1483 
1484   if (use_worker_thread_) {
1485     base::ThreadPool::PostTask(
1486         FROM_HERE,
1487         {MayBlock(), TaskPriority::BEST_EFFORT,
1488          TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
1489         BindOnce(&TraceLog::ConvertTraceEventsToTraceFormat,
1490                  std::move(previous_logged_events), flush_output_callback,
1491                  argument_filter_predicate));
1492     return;
1493   }
1494 
1495   ConvertTraceEventsToTraceFormat(std::move(previous_logged_events),
1496                                   flush_output_callback,
1497                                   argument_filter_predicate);
1498 }
1499 
1500 // Run in each thread holding a local event buffer.
FlushCurrentThread(int generation,bool discard_events)1501 void TraceLog::FlushCurrentThread(int generation, bool discard_events) {
1502   {
1503     AutoLock lock(lock_);
1504     if (!CheckGeneration(generation) || !flush_task_runner_) {
1505       // This is late. The corresponding flush has finished.
1506       return;
1507     }
1508   }
1509 
1510   // This will flush the thread local buffer.
1511   delete thread_local_event_buffer;
1512 
1513   auto on_flush_override = on_flush_override_.load(std::memory_order_relaxed);
1514   if (on_flush_override) {
1515     on_flush_override();
1516   }
1517 
1518   // Scheduler uses TRACE_EVENT macros when posting a task, which can lead
1519   // to acquiring a tracing lock. Given that posting a task requires grabbing
1520   // a scheduler lock, we need to post this task outside tracing lock to avoid
1521   // deadlocks.
1522   scoped_refptr<SequencedTaskRunner> cached_flush_task_runner;
1523   {
1524     AutoLock lock(lock_);
1525     cached_flush_task_runner = flush_task_runner_;
1526     if (!CheckGeneration(generation) || !flush_task_runner_ ||
1527         !thread_task_runners_.empty())
1528       return;
1529   }
1530   cached_flush_task_runner->PostTask(
1531       FROM_HERE, BindOnce(&TraceLog::FinishFlush, Unretained(this), generation,
1532                           discard_events));
1533 }
1534 
OnFlushTimeout(int generation,bool discard_events)1535 void TraceLog::OnFlushTimeout(int generation, bool discard_events) {
1536   {
1537     AutoLock lock(lock_);
1538     if (!CheckGeneration(generation) || !flush_task_runner_) {
1539       // Flush has finished before timeout.
1540       return;
1541     }
1542 
1543     LOG(WARNING)
1544         << "The following threads haven't finished flush in time. "
1545            "If this happens stably for some thread, please call "
1546            "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
1547            "the thread to avoid its trace events from being lost.";
1548     for (const auto& it : thread_task_runners_) {
1549       LOG(WARNING) << "Thread: "
1550                    << ThreadIdNameManager::GetInstance()->GetName(it.first);
1551     }
1552   }
1553   FinishFlush(generation, discard_events);
1554 }
1555 
UseNextTraceBuffer()1556 void TraceLog::UseNextTraceBuffer() {
1557   logged_events_.reset(CreateTraceBuffer());
1558   generation_.fetch_add(1, std::memory_order_relaxed);
1559   thread_shared_chunk_.reset();
1560   thread_shared_chunk_index_ = 0;
1561 }
1562 
ShouldAddAfterUpdatingState(char phase,const unsigned char * category_group_enabled,const char * name,uint64_t id,PlatformThreadId thread_id,const TimeTicks timestamp,TraceArguments * args)1563 bool TraceLog::ShouldAddAfterUpdatingState(
1564     char phase,
1565     const unsigned char* category_group_enabled,
1566     const char* name,
1567     uint64_t id,
1568     PlatformThreadId thread_id,
1569     const TimeTicks timestamp,
1570     TraceArguments* args) {
1571   if (!*category_group_enabled)
1572     return false;
1573 
1574   // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1575   // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1576   // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1577   if (thread_is_in_trace_event) {
1578     return false;
1579   }
1580 
1581   // Check and update the current thread name only if the event is for the
1582   // current thread to avoid locks in most cases.
1583   if (thread_id == PlatformThread::CurrentId()) {
1584     const char* new_name =
1585         ThreadIdNameManager::GetInstance()->GetNameForCurrentThread();
1586     // Check if the thread name has been set or changed since the previous
1587     // call (if any), but don't bother if the new name is empty. Note this will
1588     // not detect a thread name change within the same char* buffer address: we
1589     // favor common case performance over corner case correctness.
1590     thread_local const char* current_thread_name = nullptr;
1591     if (new_name != current_thread_name && new_name && *new_name) {
1592       current_thread_name = new_name;
1593 
1594       AutoLock thread_info_lock(thread_info_lock_);
1595 
1596       auto existing_name = thread_names_.find(thread_id);
1597       if (existing_name == thread_names_.end()) {
1598         // This is a new thread id, and a new name.
1599         thread_names_[thread_id] = new_name;
1600       } else {
1601         // This is a thread id that we've seen before, but potentially with a
1602         // new name.
1603         std::vector<std::string_view> existing_names = base::SplitStringPiece(
1604             existing_name->second, ",", base::KEEP_WHITESPACE,
1605             base::SPLIT_WANT_NONEMPTY);
1606         if (!Contains(existing_names, new_name)) {
1607           if (!existing_names.empty()) {
1608             existing_name->second.push_back(',');
1609           }
1610           existing_name->second.append(new_name);
1611         }
1612       }
1613     }
1614   }
1615 
1616 #if BUILDFLAG(IS_WIN)
1617   // This is done sooner rather than later, to avoid creating the event and
1618   // acquiring the lock, which is not needed for ETW as it's already threadsafe.
1619   if (*category_group_enabled & TraceCategory::ENABLED_FOR_ETW_EXPORT) {
1620     // ETW export expects non-null event names.
1621     name = name ? name : "";
1622     TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id,
1623                                   timestamp, args);
1624   }
1625 #endif  // BUILDFLAG(IS_WIN)
1626   return true;
1627 }
1628 
AddTraceEvent(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,TraceArguments * args,unsigned int flags)1629 TraceEventHandle TraceLog::AddTraceEvent(
1630     char phase,
1631     const unsigned char* category_group_enabled,
1632     const char* name,
1633     const char* scope,
1634     uint64_t id,
1635     TraceArguments* args,
1636     unsigned int flags) {
1637   auto thread_id = base::PlatformThread::CurrentId();
1638   base::TimeTicks now = TRACE_TIME_TICKS_NOW();
1639   return AddTraceEventWithThreadIdAndTimestamp(
1640       phase, category_group_enabled, name, scope, id,
1641       trace_event_internal::kNoId,  // bind_id
1642       thread_id, now, args, flags);
1643 }
1644 
AddTraceEventWithBindId(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,uint64_t bind_id,TraceArguments * args,unsigned int flags)1645 TraceEventHandle TraceLog::AddTraceEventWithBindId(
1646     char phase,
1647     const unsigned char* category_group_enabled,
1648     const char* name,
1649     const char* scope,
1650     uint64_t id,
1651     uint64_t bind_id,
1652     TraceArguments* args,
1653     unsigned int flags) {
1654   auto thread_id = base::PlatformThread::CurrentId();
1655   base::TimeTicks now = TRACE_TIME_TICKS_NOW();
1656   return AddTraceEventWithThreadIdAndTimestamp(
1657       phase, category_group_enabled, name, scope, id, bind_id, thread_id, now,
1658       args, flags | TRACE_EVENT_FLAG_HAS_CONTEXT_ID);
1659 }
1660 
AddTraceEventWithProcessId(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,ProcessId process_id,TraceArguments * args,unsigned int flags)1661 TraceEventHandle TraceLog::AddTraceEventWithProcessId(
1662     char phase,
1663     const unsigned char* category_group_enabled,
1664     const char* name,
1665     const char* scope,
1666     uint64_t id,
1667     ProcessId process_id,
1668     TraceArguments* args,
1669     unsigned int flags) {
1670   base::TimeTicks now = TRACE_TIME_TICKS_NOW();
1671   return AddTraceEventWithThreadIdAndTimestamp(
1672       phase, category_group_enabled, name, scope, id,
1673       trace_event_internal::kNoId,  // bind_id
1674       static_cast<PlatformThreadId>(process_id), now, args,
1675       flags | TRACE_EVENT_FLAG_HAS_PROCESS_ID);
1676 }
1677 
1678 // Handle legacy calls to AddTraceEventWithThreadIdAndTimestamp
1679 // with kNoId as bind_id
AddTraceEventWithThreadIdAndTimestamp(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,PlatformThreadId thread_id,const TimeTicks & timestamp,TraceArguments * args,unsigned int flags)1680 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1681     char phase,
1682     const unsigned char* category_group_enabled,
1683     const char* name,
1684     const char* scope,
1685     uint64_t id,
1686     PlatformThreadId thread_id,
1687     const TimeTicks& timestamp,
1688     TraceArguments* args,
1689     unsigned int flags) {
1690   return AddTraceEventWithThreadIdAndTimestamp(
1691       phase, category_group_enabled, name, scope, id,
1692       trace_event_internal::kNoId,  // bind_id
1693       thread_id, timestamp, args, flags);
1694 }
1695 
AddTraceEventWithThreadIdAndTimestamp(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,uint64_t bind_id,PlatformThreadId thread_id,const TimeTicks & timestamp,TraceArguments * args,unsigned int flags)1696 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1697     char phase,
1698     const unsigned char* category_group_enabled,
1699     const char* name,
1700     const char* scope,
1701     uint64_t id,
1702     uint64_t bind_id,
1703     PlatformThreadId thread_id,
1704     const TimeTicks& timestamp,
1705     TraceArguments* args,
1706     unsigned int flags) {
1707   ThreadTicks thread_now;
1708   // If timestamp is provided explicitly, don't record thread time as it would
1709   // be for the wrong timestamp. Similarly, if we record an event for another
1710   // process or thread, we shouldn't report the current thread's thread time.
1711   if (!(flags & TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP ||
1712         flags & TRACE_EVENT_FLAG_HAS_PROCESS_ID ||
1713         thread_id != PlatformThread::CurrentId())) {
1714     thread_now = ThreadNow();
1715   }
1716   return AddTraceEventWithThreadIdAndTimestamps(
1717       phase, category_group_enabled, name, scope, id, bind_id, thread_id,
1718       timestamp, thread_now, args, flags);
1719 }
1720 
AddTraceEventWithThreadIdAndTimestamps(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,uint64_t bind_id,PlatformThreadId thread_id,const TimeTicks & timestamp,const ThreadTicks & thread_timestamp,TraceArguments * args,unsigned int flags)1721 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamps(
1722     char phase,
1723     const unsigned char* category_group_enabled,
1724     const char* name,
1725     const char* scope,
1726     uint64_t id,
1727     uint64_t bind_id,
1728     PlatformThreadId thread_id,
1729     const TimeTicks& timestamp,
1730     const ThreadTicks& thread_timestamp,
1731     TraceArguments* args,
1732     unsigned int flags) NO_THREAD_SAFETY_ANALYSIS {
1733   TraceEventHandle handle = {0, 0, 0};
1734   if (!ShouldAddAfterUpdatingState(phase, category_group_enabled, name, id,
1735                                    thread_id, timestamp, args)) {
1736     return handle;
1737   }
1738   DCHECK(!timestamp.is_null());
1739 
1740   const AutoReset<bool> resetter(&thread_is_in_trace_event, true, false);
1741 
1742   // Flow bind_ids don't have scopes, so we need to mangle in-process ones to
1743   // avoid collisions.
1744   bool has_flow =
1745       flags & (TRACE_EVENT_FLAG_FLOW_OUT | TRACE_EVENT_FLAG_FLOW_IN);
1746   if (has_flow && (flags & TRACE_EVENT_FLAG_HAS_LOCAL_ID))
1747     bind_id = MangleEventId(bind_id);
1748 
1749   TimeTicks offset_event_timestamp = OffsetTimestamp(timestamp);
1750 
1751   ThreadLocalEventBuffer* event_buffer = nullptr;
1752   if (*category_group_enabled & RECORDING_MODE) {
1753     // |thread_local_event_buffer| can be null if the current thread doesn't
1754     // have a message loop or the message loop is blocked.
1755     InitializeThreadLocalEventBufferIfSupported();
1756     event_buffer = thread_local_event_buffer;
1757   }
1758 
1759   if (*category_group_enabled & RECORDING_MODE) {
1760     auto trace_event_override =
1761         add_trace_event_override_.load(std::memory_order_relaxed);
1762     if (trace_event_override) {
1763       TraceEvent new_trace_event(
1764           thread_id, offset_event_timestamp, thread_timestamp, phase,
1765           category_group_enabled, name, scope, id, bind_id, args, flags);
1766 
1767       trace_event_override(&new_trace_event,
1768                            /*thread_will_flush=*/event_buffer != nullptr,
1769                            &handle);
1770       return handle;
1771     }
1772   }
1773 
1774   std::string console_message;
1775 
1776   // If enabled for recording, the event should be added only if one of the
1777   // filters indicates or category is not enabled for filtering.
1778   if ((*category_group_enabled & TraceCategory::ENABLED_FOR_RECORDING)) {
1779     OptionalAutoLock lock(&lock_);
1780 
1781     TraceEvent* trace_event = nullptr;
1782     if (event_buffer) {
1783       trace_event = event_buffer->AddTraceEvent(&handle);
1784     } else {
1785       lock.EnsureAcquired();
1786       trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
1787     }
1788 
1789     // NO_THREAD_SAFETY_ANALYSIS: Conditional locking above.
1790     if (trace_event) {
1791       trace_event->Reset(thread_id, offset_event_timestamp, thread_timestamp,
1792                          phase, category_group_enabled, name, scope, id,
1793                          bind_id, args, flags);
1794     }
1795 
1796     if (trace_options() & kInternalEchoToConsole) {
1797       console_message = EventToConsoleMessage(
1798           phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
1799           timestamp, trace_event);
1800     }
1801   }
1802 
1803   if (!console_message.empty())
1804     LOG(ERROR) << console_message;
1805 
1806   return handle;
1807 }
1808 
AddMetadataEvent(const unsigned char * category_group_enabled,const char * name,TraceArguments * args,unsigned int flags)1809 void TraceLog::AddMetadataEvent(const unsigned char* category_group_enabled,
1810                                 const char* name,
1811                                 TraceArguments* args,
1812                                 unsigned int flags) {
1813   HEAP_PROFILER_SCOPED_IGNORE;
1814   auto thread_id = base::PlatformThread::CurrentId();
1815   ThreadTicks thread_now = ThreadNow();
1816   TimeTicks now = OffsetNow();
1817   AutoLock lock(lock_);
1818   auto trace_event = std::make_unique<TraceEvent>(
1819       thread_id, now, thread_now, TRACE_EVENT_PHASE_METADATA,
1820       category_group_enabled, name,
1821       trace_event_internal::kGlobalScope,  // scope
1822       trace_event_internal::kNoId,         // id
1823       trace_event_internal::kNoId,         // bind_id
1824       args, flags);
1825   metadata_events_.push_back(std::move(trace_event));
1826 }
1827 
1828 // May be called when a COMPELETE event ends and the unfinished event has been
1829 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
EventToConsoleMessage(char phase,const TimeTicks & timestamp,TraceEvent * trace_event)1830 std::string TraceLog::EventToConsoleMessage(char phase,
1831                                             const TimeTicks& timestamp,
1832                                             TraceEvent* trace_event) {
1833   HEAP_PROFILER_SCOPED_IGNORE;
1834   AutoLock thread_info_lock(thread_info_lock_);
1835 
1836   // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
1837   // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
1838   DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
1839 
1840   TimeDelta duration;
1841   auto thread_id =
1842       trace_event ? trace_event->thread_id() : PlatformThread::CurrentId();
1843   if (phase == TRACE_EVENT_PHASE_END) {
1844     duration = timestamp - thread_event_start_times_[thread_id].top();
1845     thread_event_start_times_[thread_id].pop();
1846   }
1847 
1848   std::string thread_name = thread_names_[thread_id];
1849   if (thread_colors_.find(thread_name) == thread_colors_.end()) {
1850     size_t next_color = (thread_colors_.size() % 6) + 1;
1851     thread_colors_[thread_name] = next_color;
1852   }
1853 
1854   std::ostringstream log;
1855   log << base::StringPrintf("%s: \x1b[0;3%" PRIuS "m", thread_name.c_str(),
1856                             thread_colors_[thread_name]);
1857 
1858   size_t depth = 0;
1859   auto it = thread_event_start_times_.find(thread_id);
1860   if (it != thread_event_start_times_.end())
1861     depth = it->second.size();
1862 
1863   for (size_t i = 0; i < depth; ++i)
1864     log << "| ";
1865 
1866   if (trace_event)
1867     trace_event->AppendPrettyPrinted(&log);
1868   if (phase == TRACE_EVENT_PHASE_END)
1869     log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
1870 
1871   log << "\x1b[0;m";
1872 
1873   if (phase == TRACE_EVENT_PHASE_BEGIN)
1874     thread_event_start_times_[thread_id].push(timestamp);
1875 
1876   return log.str();
1877 }
1878 
UpdateTraceEventDuration(const unsigned char * category_group_enabled,const char * name,TraceEventHandle handle)1879 void TraceLog::UpdateTraceEventDuration(
1880     const unsigned char* category_group_enabled,
1881     const char* name,
1882     TraceEventHandle handle) {
1883   if (!*category_group_enabled)
1884     return;
1885 
1886   UpdateTraceEventDurationExplicit(
1887       category_group_enabled, name, handle, base::PlatformThread::CurrentId(),
1888       /*explicit_timestamps=*/false, OffsetNow(), ThreadNow());
1889 }
1890 
UpdateTraceEventDurationExplicit(const unsigned char * category_group_enabled,const char * name,TraceEventHandle handle,PlatformThreadId thread_id,bool explicit_timestamps,const TimeTicks & now,const ThreadTicks & thread_now)1891 void TraceLog::UpdateTraceEventDurationExplicit(
1892     const unsigned char* category_group_enabled,
1893     const char* name,
1894     TraceEventHandle handle,
1895     PlatformThreadId thread_id,
1896     bool explicit_timestamps,
1897     const TimeTicks& now,
1898     const ThreadTicks& thread_now) {
1899   if (!*category_group_enabled)
1900     return;
1901 
1902   // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1903   // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1904   // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1905   if (thread_is_in_trace_event) {
1906     return;
1907   }
1908   const AutoReset<bool> resetter(&thread_is_in_trace_event, true);
1909 
1910 #if BUILDFLAG(IS_WIN)
1911   // Generate an ETW event that marks the end of a complete event.
1912   if (*category_group_enabled & TraceCategory::ENABLED_FOR_ETW_EXPORT)
1913     TraceEventETWExport::AddCompleteEndEvent(category_group_enabled, name);
1914 #endif  // BUILDFLAG(IS_WIN)
1915 
1916   if (*category_group_enabled & TraceCategory::ENABLED_FOR_RECORDING) {
1917     auto update_duration_override =
1918         update_duration_override_.load(std::memory_order_relaxed);
1919     if (update_duration_override) {
1920       update_duration_override(category_group_enabled, name, handle, thread_id,
1921                                explicit_timestamps, now, thread_now);
1922       return;
1923     }
1924   }
1925 
1926   std::string console_message;
1927   if (*category_group_enabled & TraceCategory::ENABLED_FOR_RECORDING) {
1928     OptionalAutoLock lock(&lock_);
1929 
1930     TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
1931     if (trace_event) {
1932       DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
1933 
1934       trace_event->UpdateDuration(now, thread_now);
1935     }
1936 
1937     if (trace_options() & kInternalEchoToConsole) {
1938       console_message =
1939           EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event);
1940     }
1941   }
1942 
1943   if (!console_message.empty())
1944     LOG(ERROR) << console_message;
1945 }
1946 
MangleEventId(uint64_t id)1947 uint64_t TraceLog::MangleEventId(uint64_t id) {
1948   return id ^ process_id_hash_;
1949 }
1950 
1951 template <typename T>
AddMetadataEventWhileLocked(PlatformThreadId thread_id,const char * metadata_name,const char * arg_name,const T & value)1952 void TraceLog::AddMetadataEventWhileLocked(PlatformThreadId thread_id,
1953                                            const char* metadata_name,
1954                                            const char* arg_name,
1955                                            const T& value) {
1956   auto trace_event_override =
1957       add_trace_event_override_.load(std::memory_order_relaxed);
1958   if (trace_event_override) {
1959     TraceEvent trace_event;
1960     InitializeMetadataEvent(&trace_event, thread_id, metadata_name, arg_name,
1961                             value);
1962     trace_event_override(&trace_event, /*thread_will_flush=*/true, nullptr);
1963   } else {
1964     InitializeMetadataEvent(
1965         AddEventToThreadSharedChunkWhileLocked(nullptr, false), thread_id,
1966         metadata_name, arg_name, value);
1967   }
1968 }
1969 
AddMetadataEventsWhileLocked()1970 void TraceLog::AddMetadataEventsWhileLocked() {
1971   auto trace_event_override =
1972       add_trace_event_override_.load(std::memory_order_relaxed);
1973 
1974   // Move metadata added by |AddMetadataEvent| into the trace log.
1975   if (trace_event_override) {
1976     while (!metadata_events_.empty()) {
1977       trace_event_override(metadata_events_.back().get(),
1978                            /*thread_will_flush=*/true, nullptr);
1979       metadata_events_.pop_back();
1980     }
1981   } else {
1982     while (!metadata_events_.empty()) {
1983       TraceEvent* event =
1984           AddEventToThreadSharedChunkWhileLocked(nullptr, false);
1985       *event = std::move(*metadata_events_.back());
1986       metadata_events_.pop_back();
1987     }
1988   }
1989 
1990 #if !BUILDFLAG(IS_NACL)  // NaCl shouldn't expose the process id.
1991   AddMetadataEventWhileLocked(0, "num_cpus", "number",
1992                               base::SysInfo::NumberOfProcessors());
1993 #endif
1994 
1995   auto current_thread_id = base::PlatformThread::CurrentId();
1996   if (process_sort_index_ != 0) {
1997     AddMetadataEventWhileLocked(current_thread_id, "process_sort_index",
1998                                 "sort_index", process_sort_index_);
1999   }
2000 
2001 #if BUILDFLAG(IS_ANDROID)
2002   AddMetadataEventWhileLocked(current_thread_id, "chrome_library_address",
2003                               "start_address",
2004                               base::StringPrintf("%p", &__executable_start));
2005   base::debug::ElfBuildIdBuffer build_id;
2006   size_t build_id_length =
2007       base::debug::ReadElfBuildId(&__executable_start, true, build_id);
2008   if (build_id_length > 0) {
2009     AddMetadataEventWhileLocked(current_thread_id, "chrome_library_module",
2010                                 "id", std::string(build_id));
2011   }
2012 #endif
2013 
2014   if (!process_labels_.empty()) {
2015     std::vector<std::string_view> labels;
2016     for (const auto& it : process_labels_)
2017       labels.push_back(it.second);
2018     AddMetadataEventWhileLocked(current_thread_id, "process_labels", "labels",
2019                                 base::JoinString(labels, ","));
2020   }
2021 
2022   // Thread sort indices.
2023   for (const auto& it : thread_sort_indices_) {
2024     if (it.second == 0)
2025       continue;
2026     AddMetadataEventWhileLocked(it.first, "thread_sort_index", "sort_index",
2027                                 it.second);
2028   }
2029 
2030   // If buffer is full, add a metadata record to report this.
2031   if (!buffer_limit_reached_timestamp_.is_null()) {
2032     AddMetadataEventWhileLocked(current_thread_id, "trace_buffer_overflowed",
2033                                 "overflowed_at_ts",
2034                                 buffer_limit_reached_timestamp_);
2035   }
2036 }
2037 
GetEventByHandle(TraceEventHandle handle)2038 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
2039   return GetEventByHandleInternal(handle, nullptr);
2040 }
2041 
GetEventByHandleInternal(TraceEventHandle handle,OptionalAutoLock * lock)2042 TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
2043                                                OptionalAutoLock* lock)
2044     NO_THREAD_SAFETY_ANALYSIS {
2045   if (!handle.chunk_seq)
2046     return nullptr;
2047 
2048   DCHECK(handle.chunk_seq);
2049   DCHECK(handle.chunk_index <= TraceBufferChunk::kMaxChunkIndex);
2050   DCHECK(handle.event_index <= TraceBufferChunk::kTraceBufferChunkSize - 1);
2051 
2052   if (thread_local_event_buffer) {
2053     TraceEvent* trace_event =
2054         thread_local_event_buffer->GetEventByHandle(handle);
2055     if (trace_event)
2056       return trace_event;
2057   }
2058 
2059   // The event has been out-of-control of the thread local buffer.
2060   // Try to get the event from the main buffer with a lock.
2061   // NO_THREAD_SAFETY_ANALYSIS: runtime-dependent locking here.
2062   if (lock)
2063     lock->EnsureAcquired();
2064 
2065   if (thread_shared_chunk_ &&
2066       handle.chunk_index == thread_shared_chunk_index_) {
2067     return handle.chunk_seq == thread_shared_chunk_->seq()
2068                ? thread_shared_chunk_->GetEventAt(handle.event_index)
2069                : nullptr;
2070   }
2071 
2072   return logged_events_->GetEventByHandle(handle);
2073 }
2074 
SetProcessID(ProcessId process_id)2075 void TraceLog::SetProcessID(ProcessId process_id) {
2076   process_id_ = process_id;
2077   // Create a FNV hash from the process ID for XORing.
2078   // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
2079   const uint64_t kOffsetBasis = 14695981039346656037ull;
2080   const uint64_t kFnvPrime = 1099511628211ull;
2081   const uint64_t pid = static_cast<uint64_t>(process_id_);
2082   process_id_hash_ = (kOffsetBasis ^ pid) * kFnvPrime;
2083 }
2084 
SetProcessSortIndex(int sort_index)2085 void TraceLog::SetProcessSortIndex(int sort_index) {
2086   AutoLock lock(lock_);
2087   process_sort_index_ = sort_index;
2088 }
2089 
OnSetProcessName(const std::string & process_name)2090 void TraceLog::OnSetProcessName(const std::string& process_name) {
2091 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
2092   if (perfetto::Tracing::IsInitialized()) {
2093     auto track = perfetto::ProcessTrack::Current();
2094     auto desc = track.Serialize();
2095     desc.mutable_process()->set_process_name(process_name);
2096     desc.mutable_process()->set_pid(static_cast<int>(process_id_));
2097     TrackEvent::SetTrackDescriptor(track, std::move(desc));
2098   }
2099 #endif
2100 }
2101 
GetNewProcessLabelId()2102 int TraceLog::GetNewProcessLabelId() {
2103   AutoLock lock(lock_);
2104   return next_process_label_id_++;
2105 }
2106 
UpdateProcessLabel(int label_id,const std::string & current_label)2107 void TraceLog::UpdateProcessLabel(int label_id,
2108                                   const std::string& current_label) {
2109   if (!current_label.length())
2110     return RemoveProcessLabel(label_id);
2111 
2112 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
2113   if (perfetto::Tracing::IsInitialized()) {
2114     auto track = perfetto::ProcessTrack::Current();
2115     auto desc = track.Serialize();
2116     desc.mutable_process()->add_process_labels(current_label);
2117     TrackEvent::SetTrackDescriptor(track, std::move(desc));
2118   }
2119 #endif
2120 
2121   AutoLock lock(lock_);
2122   process_labels_[label_id] = current_label;
2123 }
2124 
RemoveProcessLabel(int label_id)2125 void TraceLog::RemoveProcessLabel(int label_id) {
2126   AutoLock lock(lock_);
2127   process_labels_.erase(label_id);
2128 }
2129 
SetThreadSortIndex(PlatformThreadId thread_id,int sort_index)2130 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
2131   AutoLock lock(lock_);
2132   thread_sort_indices_[thread_id] = sort_index;
2133 }
2134 
2135 #if !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
SetTimeOffset(TimeDelta offset)2136 void TraceLog::SetTimeOffset(TimeDelta offset) {
2137   time_offset_ = offset;
2138 }
2139 #endif  // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
2140 
GetObserverCountForTest() const2141 size_t TraceLog::GetObserverCountForTest() const {
2142   AutoLock lock(observers_lock_);
2143   return enabled_state_observers_.size();
2144 }
2145 
SetCurrentThreadBlocksMessageLoop()2146 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
2147   thread_blocks_message_loop = true;
2148   // This will flush the thread local buffer.
2149   delete thread_local_event_buffer;
2150 }
2151 
CreateTraceBuffer()2152 TraceBuffer* TraceLog::CreateTraceBuffer() {
2153   HEAP_PROFILER_SCOPED_IGNORE;
2154   InternalTraceOptions options = trace_options();
2155   const size_t config_buffer_chunks =
2156       trace_config_.GetTraceBufferSizeInEvents() / kTraceBufferChunkSize;
2157   if (options & kInternalRecordContinuously) {
2158     return TraceBuffer::CreateTraceBufferRingBuffer(
2159         config_buffer_chunks > 0 ? config_buffer_chunks
2160                                  : kTraceEventRingBufferChunks);
2161   }
2162   if (options & kInternalEchoToConsole) {
2163     return TraceBuffer::CreateTraceBufferRingBuffer(
2164         config_buffer_chunks > 0 ? config_buffer_chunks
2165                                  : kEchoToConsoleTraceEventBufferChunks);
2166   }
2167   if (options & kInternalRecordAsMuchAsPossible) {
2168     return TraceBuffer::CreateTraceBufferVectorOfSize(
2169         config_buffer_chunks > 0 ? config_buffer_chunks
2170                                  : kTraceEventVectorBigBufferChunks);
2171   }
2172   return TraceBuffer::CreateTraceBufferVectorOfSize(
2173       config_buffer_chunks > 0 ? config_buffer_chunks
2174                                : kTraceEventVectorBufferChunks);
2175 }
2176 
2177 #if BUILDFLAG(IS_WIN)
UpdateETWCategoryGroupEnabledFlags()2178 void TraceLog::UpdateETWCategoryGroupEnabledFlags() {
2179   // Go through each category and set/clear the ETW bit depending on whether the
2180   // category is enabled.
2181   for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
2182     if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
2183             category.name())) {
2184       category.set_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
2185     } else {
2186       category.clear_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
2187     }
2188   }
2189 }
2190 #endif  // BUILDFLAG(IS_WIN)
2191 
SetTraceBufferForTesting(std::unique_ptr<TraceBuffer> trace_buffer)2192 void TraceLog::SetTraceBufferForTesting(
2193     std::unique_ptr<TraceBuffer> trace_buffer) {
2194   AutoLock lock(lock_);
2195   logged_events_ = std::move(trace_buffer);
2196 }
2197 
2198 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
OnSetup(const perfetto::DataSourceBase::SetupArgs & args)2199 void TraceLog::OnSetup(const perfetto::DataSourceBase::SetupArgs& args) {
2200   AutoLock lock(track_event_lock_);
2201   track_event_sessions_.emplace_back(args.internal_instance_index, *args.config,
2202                                      args.backend_type);
2203 }
2204 
OnStart(const perfetto::DataSourceBase::StartArgs &)2205 void TraceLog::OnStart(const perfetto::DataSourceBase::StartArgs&) {
2206   ++active_track_event_sessions_;
2207   // Legacy observers don't support multiple tracing sessions. So we only
2208   // notify them about the first one.
2209   if (active_track_event_sessions_ > 1) {
2210     return;
2211   }
2212 
2213   AutoLock lock(observers_lock_);
2214   for (EnabledStateObserver* observer : enabled_state_observers_)
2215     observer->OnTraceLogEnabled();
2216   for (const auto& it : async_observers_) {
2217     it.second.task_runner->PostTask(
2218         FROM_HERE, BindOnce(&AsyncEnabledStateObserver::OnTraceLogEnabled,
2219                             it.second.observer));
2220   }
2221 }
2222 
OnStop(const perfetto::DataSourceBase::StopArgs & args)2223 void TraceLog::OnStop(const perfetto::DataSourceBase::StopArgs& args) {
2224   {
2225     // We can't use |lock_| because OnStop() can be called from within
2226     // SetDisabled(). We also can't use |observers_lock_|, because observers
2227     // below can call into IsEnabled(), which needs to access
2228     // |track_event_sessions_|. So we use a separate lock.
2229     AutoLock track_event_lock(track_event_lock_);
2230     std::erase_if(track_event_sessions_, [&args](
2231                                              const TrackEventSession& session) {
2232       return session.internal_instance_index == args.internal_instance_index;
2233     });
2234   }
2235 
2236   --active_track_event_sessions_;
2237   // Legacy observers don't support multiple tracing sessions. So we only
2238   // notify them when the last one stopped.
2239   if (active_track_event_sessions_ > 0) {
2240     return;
2241   }
2242 
2243   AutoLock lock(observers_lock_);
2244   for (base::trace_event::TraceLog::EnabledStateObserver* it :
2245        enabled_state_observers_) {
2246     it->OnTraceLogDisabled();
2247   }
2248   for (const auto& it : async_observers_) {
2249     it.second.task_runner->PostTask(
2250         FROM_HERE, BindOnce(&AsyncEnabledStateObserver::OnTraceLogDisabled,
2251                             it.second.observer));
2252   }
2253 }
2254 #endif  // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
2255 
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)2256 void ConvertableToTraceFormat::EstimateTraceMemoryOverhead(
2257     TraceEventMemoryOverhead* overhead) {
2258   overhead->Add(TraceEventMemoryOverhead::kConvertableToTraceFormat,
2259                 sizeof(*this));
2260 }
2261 
2262 }  // namespace trace_event
2263 }  // namespace base
2264 
2265 namespace trace_event_internal {
2266 
AddTraceEvent(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,base::trace_event::TraceArguments * args,unsigned int flags)2267 base::trace_event::TraceEventHandle AddTraceEvent(
2268     char phase,
2269     const unsigned char* category_group_enabled,
2270     const char* name,
2271     const char* scope,
2272     uint64_t id,
2273     base::trace_event::TraceArguments* args,
2274     unsigned int flags) {
2275   return base::trace_event::TraceLog::GetInstance()->AddTraceEvent(
2276       phase, category_group_enabled, name, scope, id, args, flags);
2277 }
2278 
AddTraceEventWithBindId(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,uint64_t bind_id,base::trace_event::TraceArguments * args,unsigned int flags)2279 base::trace_event::TraceEventHandle AddTraceEventWithBindId(
2280     char phase,
2281     const unsigned char* category_group_enabled,
2282     const char* name,
2283     const char* scope,
2284     uint64_t id,
2285     uint64_t bind_id,
2286     base::trace_event::TraceArguments* args,
2287     unsigned int flags) {
2288   return base::trace_event::TraceLog::GetInstance()->AddTraceEventWithBindId(
2289       phase, category_group_enabled, name, scope, id, bind_id, args, flags);
2290 }
2291 
AddTraceEventWithProcessId(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,base::ProcessId process_id,base::trace_event::TraceArguments * args,unsigned int flags)2292 base::trace_event::TraceEventHandle AddTraceEventWithProcessId(
2293     char phase,
2294     const unsigned char* category_group_enabled,
2295     const char* name,
2296     const char* scope,
2297     uint64_t id,
2298     base::ProcessId process_id,
2299     base::trace_event::TraceArguments* args,
2300     unsigned int flags) {
2301   return base::trace_event::TraceLog::GetInstance()->AddTraceEventWithProcessId(
2302       phase, category_group_enabled, name, scope, id, process_id, args, flags);
2303 }
2304 
AddTraceEventWithThreadIdAndTimestamp(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,base::PlatformThreadId thread_id,const base::TimeTicks & timestamp,base::trace_event::TraceArguments * args,unsigned int flags)2305 base::trace_event::TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
2306     char phase,
2307     const unsigned char* category_group_enabled,
2308     const char* name,
2309     const char* scope,
2310     uint64_t id,
2311     base::PlatformThreadId thread_id,
2312     const base::TimeTicks& timestamp,
2313     base::trace_event::TraceArguments* args,
2314     unsigned int flags) {
2315   return base::trace_event::TraceLog::GetInstance()
2316       ->AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
2317                                               name, scope, id, thread_id,
2318                                               timestamp, args, flags);
2319 }
2320 
AddTraceEventWithThreadIdAndTimestamp(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,uint64_t bind_id,base::PlatformThreadId thread_id,const base::TimeTicks & timestamp,base::trace_event::TraceArguments * args,unsigned int flags)2321 base::trace_event::TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
2322     char phase,
2323     const unsigned char* category_group_enabled,
2324     const char* name,
2325     const char* scope,
2326     uint64_t id,
2327     uint64_t bind_id,
2328     base::PlatformThreadId thread_id,
2329     const base::TimeTicks& timestamp,
2330     base::trace_event::TraceArguments* args,
2331     unsigned int flags) {
2332   return base::trace_event::TraceLog::GetInstance()
2333       ->AddTraceEventWithThreadIdAndTimestamp(
2334           phase, category_group_enabled, name, scope, id, bind_id, thread_id,
2335           timestamp, args, flags);
2336 }
2337 
AddTraceEventWithThreadIdAndTimestamps(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,base::PlatformThreadId thread_id,const base::TimeTicks & timestamp,const base::ThreadTicks & thread_timestamp,unsigned int flags)2338 base::trace_event::TraceEventHandle AddTraceEventWithThreadIdAndTimestamps(
2339     char phase,
2340     const unsigned char* category_group_enabled,
2341     const char* name,
2342     const char* scope,
2343     uint64_t id,
2344     base::PlatformThreadId thread_id,
2345     const base::TimeTicks& timestamp,
2346     const base::ThreadTicks& thread_timestamp,
2347     unsigned int flags) {
2348   return base::trace_event::TraceLog::GetInstance()
2349       ->AddTraceEventWithThreadIdAndTimestamps(
2350           phase, category_group_enabled, name, scope, id,
2351           /*bind_id=*/trace_event_internal::kNoId, thread_id, timestamp,
2352           thread_timestamp, nullptr, flags);
2353 }
2354 
AddMetadataEvent(const unsigned char * category_group_enabled,const char * name,base::trace_event::TraceArguments * args,unsigned int flags)2355 void AddMetadataEvent(const unsigned char* category_group_enabled,
2356                       const char* name,
2357                       base::trace_event::TraceArguments* args,
2358                       unsigned int flags) {
2359   return base::trace_event::TraceLog::GetInstance()->AddMetadataEvent(
2360       category_group_enabled, name, args, flags);
2361 }
2362 
GetNumTracesRecorded()2363 int GetNumTracesRecorded() {
2364   return base::trace_event::TraceLog::GetInstance()->GetNumTracesRecorded();
2365 }
2366 
UpdateTraceEventDuration(const unsigned char * category_group_enabled,const char * name,base::trace_event::TraceEventHandle handle)2367 void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
2368                               const char* name,
2369                               base::trace_event::TraceEventHandle handle) {
2370   return base::trace_event::TraceLog::GetInstance()->UpdateTraceEventDuration(
2371       category_group_enabled, name, handle);
2372 }
2373 
UpdateTraceEventDurationExplicit(const unsigned char * category_group_enabled,const char * name,base::trace_event::TraceEventHandle handle,base::PlatformThreadId thread_id,bool explicit_timestamps,const base::TimeTicks & now,const base::ThreadTicks & thread_now)2374 void UpdateTraceEventDurationExplicit(
2375     const unsigned char* category_group_enabled,
2376     const char* name,
2377     base::trace_event::TraceEventHandle handle,
2378     base::PlatformThreadId thread_id,
2379     bool explicit_timestamps,
2380     const base::TimeTicks& now,
2381     const base::ThreadTicks& thread_now) {
2382   return base::trace_event::TraceLog::GetInstance()
2383       ->UpdateTraceEventDurationExplicit(category_group_enabled, name, handle,
2384                                          thread_id, explicit_timestamps, now,
2385                                          thread_now);
2386 }
2387 
2388 #if !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
ScopedTraceBinaryEfficient(const char * category_group,const char * name)2389 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
2390     const char* category_group,
2391     const char* name) {
2392   // The single atom works because for now the category_group can only be "gpu".
2393   DCHECK_EQ(strcmp(category_group, "gpu"), 0);
2394   static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
2395   INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
2396       category_group, atomic, category_group_enabled_);
2397   name_ = name;
2398   if (*category_group_enabled_) {
2399     event_handle_ =
2400         TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
2401             TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name,
2402             trace_event_internal::kGlobalScope,  // scope
2403             trace_event_internal::kNoId,         // id
2404             base::PlatformThread::CurrentId(),   // thread_id
2405             TRACE_TIME_TICKS_NOW(), nullptr, TRACE_EVENT_FLAG_NONE);
2406   }
2407 }
2408 
~ScopedTraceBinaryEfficient()2409 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
2410   if (*category_group_enabled_) {
2411     TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_,
2412                                                 event_handle_);
2413   }
2414 }
2415 #endif  // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
2416 
2417 }  // namespace trace_event_internal
2418