1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/metrics/sample_vector.h"
6
7 #include <ostream>
8 #include <string_view>
9
10 #include "base/check_op.h"
11 #include "base/compiler_specific.h"
12 #include "base/containers/heap_array.h"
13 #include "base/debug/crash_logging.h"
14 #include "base/debug/leak_annotations.h"
15 #include "base/lazy_instance.h"
16 #include "base/memory/ptr_util.h"
17 #include "base/memory/raw_span.h"
18 #include "base/metrics/persistent_memory_allocator.h"
19 #include "base/notreached.h"
20 #include "base/numerics/safe_conversions.h"
21 #include "base/strings/strcat.h"
22 #include "base/strings/string_number_conversions.h"
23 #include "base/strings/stringprintf.h"
24 #include "base/synchronization/lock.h"
25 #include "base/threading/platform_thread.h"
26
27 // This SampleVector makes use of the single-sample embedded in the base
28 // HistogramSamples class. If the count is non-zero then there is guaranteed
29 // (within the bounds of "eventual consistency") to be no allocated external
30 // storage. Once the full counts storage is allocated, the single-sample must
31 // be extracted and disabled.
32
33 namespace base {
34
35 typedef HistogramBase::Count Count;
36 typedef HistogramBase::Sample Sample;
37
38 namespace {
39
40 // An iterator for sample vectors.
41 template <typename T>
42 class IteratorTemplate : public SampleCountIterator {
43 public:
IteratorTemplate(base::span<T> counts,const BucketRanges * bucket_ranges)44 IteratorTemplate(base::span<T> counts, const BucketRanges* bucket_ranges)
45 : counts_(counts), bucket_ranges_(bucket_ranges) {
46 SkipEmptyBuckets();
47 }
48
49 ~IteratorTemplate() override;
50
51 // SampleCountIterator:
Done() const52 bool Done() const override { return index_ >= counts_.size(); }
Next()53 void Next() override {
54 DCHECK(!Done());
55 index_++;
56 SkipEmptyBuckets();
57 }
58 void Get(HistogramBase::Sample* min,
59 int64_t* max,
60 HistogramBase::Count* count) override;
61
62 // SampleVector uses predefined buckets, so iterator can return bucket index.
GetBucketIndex(size_t * index) const63 bool GetBucketIndex(size_t* index) const override {
64 DCHECK(!Done());
65 if (index != nullptr) {
66 *index = index_;
67 }
68 return true;
69 }
70
71 private:
SkipEmptyBuckets()72 void SkipEmptyBuckets() {
73 if (Done()) {
74 return;
75 }
76
77 while (index_ < counts_.size()) {
78 if (subtle::NoBarrier_Load(&counts_[index_]) != 0) {
79 return;
80 }
81 index_++;
82 }
83 }
84
85 raw_span<T> counts_;
86 raw_ptr<const BucketRanges> bucket_ranges_;
87 size_t index_ = 0;
88 };
89
90 using SampleVectorIterator = IteratorTemplate<const HistogramBase::AtomicCount>;
91
92 template <>
93 SampleVectorIterator::~IteratorTemplate() = default;
94
95 // Get() for an iterator of a SampleVector.
96 template <>
Get(HistogramBase::Sample * min,int64_t * max,HistogramBase::Count * count)97 void SampleVectorIterator::Get(HistogramBase::Sample* min,
98 int64_t* max,
99 HistogramBase::Count* count) {
100 DCHECK(!Done());
101 *min = bucket_ranges_->range(index_);
102 *max = strict_cast<int64_t>(bucket_ranges_->range(index_ + 1));
103 *count = subtle::NoBarrier_Load(&counts_[index_]);
104 }
105
106 using ExtractingSampleVectorIterator =
107 IteratorTemplate<HistogramBase::AtomicCount>;
108
109 template <>
~IteratorTemplate()110 ExtractingSampleVectorIterator::~IteratorTemplate() {
111 // Ensure that the user has consumed all the samples in order to ensure no
112 // samples are lost.
113 DCHECK(Done());
114 }
115
116 // Get() for an extracting iterator of a SampleVector.
117 template <>
Get(HistogramBase::Sample * min,int64_t * max,HistogramBase::Count * count)118 void ExtractingSampleVectorIterator::Get(HistogramBase::Sample* min,
119 int64_t* max,
120 HistogramBase::Count* count) {
121 DCHECK(!Done());
122 *min = bucket_ranges_->range(index_);
123 *max = strict_cast<int64_t>(bucket_ranges_->range(index_ + 1));
124 *count = subtle::NoBarrier_AtomicExchange(&counts_[index_], 0);
125 }
126
127 } // namespace
128
SampleVectorBase(uint64_t id,Metadata * meta,const BucketRanges * bucket_ranges)129 SampleVectorBase::SampleVectorBase(uint64_t id,
130 Metadata* meta,
131 const BucketRanges* bucket_ranges)
132 : HistogramSamples(id, meta),
133 bucket_ranges_(bucket_ranges),
134 counts_size_(bucket_ranges_->bucket_count()) {
135 CHECK_GE(counts_size_, 1u);
136 }
137
SampleVectorBase(uint64_t id,std::unique_ptr<Metadata> meta,const BucketRanges * bucket_ranges)138 SampleVectorBase::SampleVectorBase(uint64_t id,
139 std::unique_ptr<Metadata> meta,
140 const BucketRanges* bucket_ranges)
141 : HistogramSamples(id, std::move(meta)),
142 bucket_ranges_(bucket_ranges),
143 counts_size_(bucket_ranges_->bucket_count()) {
144 CHECK_GE(counts_size_, 1u);
145 }
146
147 SampleVectorBase::~SampleVectorBase() = default;
148
Accumulate(Sample value,Count count)149 void SampleVectorBase::Accumulate(Sample value, Count count) {
150 const size_t bucket_index = GetBucketIndex(value);
151
152 // Handle the single-sample case.
153 if (!counts().has_value()) {
154 // Try to accumulate the parameters into the single-count entry.
155 if (AccumulateSingleSample(value, count, bucket_index)) {
156 // A race condition could lead to a new single-sample being accumulated
157 // above just after another thread executed the MountCountsStorage below.
158 // Since it is mounted, it could be mounted elsewhere and have values
159 // written to it. It's not allowed to have both a single-sample and
160 // entries in the counts array so move the single-sample.
161 if (counts().has_value()) {
162 MoveSingleSampleToCounts();
163 }
164 return;
165 }
166
167 // Need real storage to store both what was in the single-sample plus the
168 // parameter information.
169 MountCountsStorageAndMoveSingleSample();
170 }
171
172 // Handle the multi-sample case.
173 Count new_bucket_count =
174 subtle::NoBarrier_AtomicIncrement(&counts_at(bucket_index), count);
175 IncreaseSumAndCount(strict_cast<int64_t>(count) * value, count);
176
177 // TODO(bcwhite) Remove after crbug.com/682680.
178 Count old_bucket_count = new_bucket_count - count;
179 bool record_negative_sample =
180 (new_bucket_count >= 0) != (old_bucket_count >= 0) && count > 0;
181 if (UNLIKELY(record_negative_sample)) {
182 RecordNegativeSample(SAMPLES_ACCUMULATE_OVERFLOW, count);
183 }
184 }
185
GetCount(Sample value) const186 Count SampleVectorBase::GetCount(Sample value) const {
187 return GetCountAtIndex(GetBucketIndex(value));
188 }
189
TotalCount() const190 Count SampleVectorBase::TotalCount() const {
191 // Handle the single-sample case.
192 SingleSample sample = single_sample().Load();
193 if (sample.count != 0) {
194 return sample.count;
195 }
196
197 // Handle the multi-sample case.
198 if (counts().has_value() || MountExistingCountsStorage()) {
199 Count count = 0;
200 // TODO(danakj): In C++23 we can skip the `counts_span` lvalue and iterate
201 // over `counts().value()` directly without creating a dangling reference.
202 span<const HistogramBase::AtomicCount> counts_span = counts().value();
203 for (const HistogramBase::AtomicCount& c : counts_span) {
204 count += subtle::NoBarrier_Load(&c);
205 }
206 return count;
207 }
208
209 // And the no-value case.
210 return 0;
211 }
212
GetCountAtIndex(size_t bucket_index) const213 Count SampleVectorBase::GetCountAtIndex(size_t bucket_index) const {
214 DCHECK(bucket_index < counts_size());
215
216 // Handle the single-sample case.
217 SingleSample sample = single_sample().Load();
218 if (sample.count != 0) {
219 return sample.bucket == bucket_index ? sample.count : 0;
220 }
221
222 // Handle the multi-sample case.
223 if (counts().has_value() || MountExistingCountsStorage()) {
224 return subtle::NoBarrier_Load(&counts_at(bucket_index));
225 }
226
227 // And the no-value case.
228 return 0;
229 }
230
Iterator() const231 std::unique_ptr<SampleCountIterator> SampleVectorBase::Iterator() const {
232 // Handle the single-sample case.
233 SingleSample sample = single_sample().Load();
234 if (sample.count != 0) {
235 static_assert(std::is_unsigned<decltype(SingleSample::bucket)>::value);
236 if (sample.bucket >= bucket_ranges_->bucket_count()) {
237 // Return an empty iterator if the specified bucket is invalid (e.g. due
238 // to corruption). If a different sample is eventually emitted, we will
239 // move from SingleSample to a counts storage, and that time, we will
240 // discard this invalid sample (see MoveSingleSampleToCounts()).
241 return std::make_unique<SampleVectorIterator>(
242 base::span<const HistogramBase::AtomicCount>(), bucket_ranges_);
243 }
244
245 return std::make_unique<SingleSampleIterator>(
246 bucket_ranges_->range(sample.bucket),
247 bucket_ranges_->range(sample.bucket + 1), sample.count, sample.bucket,
248 /*value_was_extracted=*/false);
249 }
250
251 // Handle the multi-sample case.
252 if (counts().has_value() || MountExistingCountsStorage()) {
253 return std::make_unique<SampleVectorIterator>(*counts(), bucket_ranges_);
254 }
255
256 // And the no-value case.
257 return std::make_unique<SampleVectorIterator>(
258 base::span<const HistogramBase::AtomicCount>(), bucket_ranges_);
259 }
260
ExtractingIterator()261 std::unique_ptr<SampleCountIterator> SampleVectorBase::ExtractingIterator() {
262 // Handle the single-sample case.
263 SingleSample sample = single_sample().Extract();
264 if (sample.count != 0) {
265 static_assert(std::is_unsigned<decltype(SingleSample::bucket)>::value);
266 if (sample.bucket >= bucket_ranges_->bucket_count()) {
267 // Return an empty iterator if the specified bucket is invalid (e.g. due
268 // to corruption). Note that we've already removed the sample from the
269 // underlying data, so this invalid sample is discarded.
270 return std::make_unique<ExtractingSampleVectorIterator>(
271 base::span<HistogramBase::AtomicCount>(), bucket_ranges_);
272 }
273
274 // Note that we have already extracted the samples (i.e., reset the
275 // underlying data back to 0 samples), even before the iterator has been
276 // used. This means that the caller needs to ensure that this value is
277 // eventually consumed, otherwise the sample is lost. There is no iterator
278 // that simply points to the underlying SingleSample and extracts its value
279 // on-demand because there are tricky edge cases when the SingleSample is
280 // disabled between the creation of the iterator and the actual call to
281 // Get() (for example, due to histogram changing to use a vector to store
282 // its samples).
283 return std::make_unique<SingleSampleIterator>(
284 bucket_ranges_->range(sample.bucket),
285 bucket_ranges_->range(sample.bucket + 1), sample.count, sample.bucket,
286 /*value_was_extracted=*/true);
287 }
288
289 // Handle the multi-sample case.
290 if (counts().has_value() || MountExistingCountsStorage()) {
291 return std::make_unique<ExtractingSampleVectorIterator>(*counts(),
292 bucket_ranges_);
293 }
294
295 // And the no-value case.
296 return std::make_unique<ExtractingSampleVectorIterator>(
297 base::span<HistogramBase::AtomicCount>(), bucket_ranges_);
298 }
299
AddSubtractImpl(SampleCountIterator * iter,HistogramSamples::Operator op)300 bool SampleVectorBase::AddSubtractImpl(SampleCountIterator* iter,
301 HistogramSamples::Operator op) {
302 // Stop now if there's nothing to do.
303 if (iter->Done()) {
304 return true;
305 }
306
307 // Get the first value and its index.
308 HistogramBase::Sample min;
309 int64_t max;
310 HistogramBase::Count count;
311 iter->Get(&min, &max, &count);
312 size_t dest_index = GetBucketIndex(min);
313
314 // The destination must be a superset of the source meaning that though the
315 // incoming ranges will find an exact match, the incoming bucket-index, if
316 // it exists, may be offset from the destination bucket-index. Calculate
317 // that offset of the passed iterator; there are are no overflow checks
318 // because 2's compliment math will work it out in the end.
319 //
320 // Because GetBucketIndex() always returns the same true or false result for
321 // a given iterator object, |index_offset| is either set here and used below,
322 // or never set and never used. The compiler doesn't know this, though, which
323 // is why it's necessary to initialize it to something.
324 size_t index_offset = 0;
325 size_t iter_index;
326 if (iter->GetBucketIndex(&iter_index)) {
327 index_offset = dest_index - iter_index;
328 }
329 if (dest_index >= counts_size()) {
330 return false;
331 }
332
333 // Post-increment. Information about the current sample is not available
334 // after this point.
335 iter->Next();
336
337 // Single-value storage is possible if there is no counts storage and the
338 // retrieved entry is the only one in the iterator.
339 if (!counts().has_value()) {
340 if (iter->Done()) {
341 // Don't call AccumulateSingleSample because that updates sum and count
342 // which was already done by the caller of this method.
343 if (single_sample().Accumulate(
344 dest_index, op == HistogramSamples::ADD ? count : -count)) {
345 // Handle race-condition that mounted counts storage between above and
346 // here.
347 if (counts().has_value()) {
348 MoveSingleSampleToCounts();
349 }
350 return true;
351 }
352 }
353
354 // The counts storage will be needed to hold the multiple incoming values.
355 MountCountsStorageAndMoveSingleSample();
356 }
357
358 // Go through the iterator and add the counts into correct bucket.
359 while (true) {
360 // Ensure that the sample's min/max match the ranges min/max.
361 if (min != bucket_ranges_->range(dest_index) ||
362 max != bucket_ranges_->range(dest_index + 1)) {
363 #if !BUILDFLAG(IS_NACL)
364 // TODO(crbug/1432981): Remove these. They are used to investigate
365 // unexpected failures.
366 SCOPED_CRASH_KEY_NUMBER("SampleVector", "min", min);
367 SCOPED_CRASH_KEY_NUMBER("SampleVector", "max", max);
368 SCOPED_CRASH_KEY_NUMBER("SampleVector", "range_min",
369 bucket_ranges_->range(dest_index));
370 SCOPED_CRASH_KEY_NUMBER("SampleVector", "range_max",
371 bucket_ranges_->range(dest_index + 1));
372 #endif // !BUILDFLAG(IS_NACL)
373 NOTREACHED() << "sample=" << min << "," << max
374 << "; range=" << bucket_ranges_->range(dest_index) << ","
375 << bucket_ranges_->range(dest_index + 1);
376 return false;
377 }
378
379 // Sample's bucket matches exactly. Adjust count.
380 subtle::NoBarrier_AtomicIncrement(
381 &counts_at(dest_index), op == HistogramSamples::ADD ? count : -count);
382
383 // Advance to the next iterable sample. See comments above for how
384 // everything works.
385 if (iter->Done()) {
386 return true;
387 }
388 iter->Get(&min, &max, &count);
389 if (iter->GetBucketIndex(&iter_index)) {
390 // Destination bucket is a known offset from the source bucket.
391 dest_index = iter_index + index_offset;
392 } else {
393 // Destination bucket has to be determined anew each time.
394 dest_index = GetBucketIndex(min);
395 }
396 if (dest_index >= counts_size()) {
397 return false;
398 }
399 iter->Next();
400 }
401 }
402
403 // Uses simple binary search or calculates the index directly if it's an "exact"
404 // linear histogram. This is very general, but there are better approaches if we
405 // knew that the buckets were linearly distributed.
GetBucketIndex(Sample value) const406 size_t SampleVectorBase::GetBucketIndex(Sample value) const {
407 size_t bucket_count = bucket_ranges_->bucket_count();
408 CHECK_GE(value, bucket_ranges_->range(0));
409 CHECK_LT(value, bucket_ranges_->range(bucket_count));
410
411 // For "exact" linear histograms, e.g. bucket_count = maximum + 1, their
412 // minimum is 1 and bucket sizes are 1. Thus, we don't need to binary search
413 // the bucket index. The bucket index for bucket |value| is just the |value|.
414 Sample maximum = bucket_ranges_->range(bucket_count - 1);
415 if (maximum == static_cast<Sample>(bucket_count - 1)) {
416 // |value| is in the underflow bucket.
417 if (value < 1) {
418 return 0;
419 }
420 // |value| is in the overflow bucket.
421 if (value > maximum) {
422 return bucket_count - 1;
423 }
424 return static_cast<size_t>(value);
425 }
426
427 size_t under = 0;
428 size_t over = bucket_count;
429 size_t mid;
430 do {
431 DCHECK_GE(over, under);
432 mid = under + (over - under) / 2;
433 if (mid == under) {
434 break;
435 }
436 if (bucket_ranges_->range(mid) <= value) {
437 under = mid;
438 } else {
439 over = mid;
440 }
441 } while (true);
442
443 DCHECK_LE(bucket_ranges_->range(mid), value);
444 CHECK_GT(bucket_ranges_->range(mid + 1), value);
445 return mid;
446 }
447
MoveSingleSampleToCounts()448 void SampleVectorBase::MoveSingleSampleToCounts() {
449 DCHECK(counts().has_value());
450
451 // Disable the single-sample since there is now counts storage for the data.
452 SingleSample sample = single_sample().ExtractAndDisable();
453
454 // Stop here if there is no "count" as trying to find the bucket index of
455 // an invalid (including zero) "value" will crash.
456 if (sample.count == 0) {
457 return;
458 }
459
460 // Stop here if the sample bucket would be out of range for the AtomicCount
461 // array.
462 if (sample.bucket >= counts_size()) {
463 return;
464 }
465
466 // Move the value into storage. Sum and redundant-count already account
467 // for this entry so no need to call IncreaseSumAndCount().
468 subtle::NoBarrier_AtomicIncrement(&counts_at(sample.bucket), sample.count);
469 }
470
MountCountsStorageAndMoveSingleSample()471 void SampleVectorBase::MountCountsStorageAndMoveSingleSample() {
472 // There are many SampleVector objects and the lock is needed very
473 // infrequently (just when advancing from single-sample to multi-sample) so
474 // define a single, global lock that all can use. This lock only prevents
475 // concurrent entry into the code below; access and updates to |counts_data_|
476 // still requires atomic operations.
477 static LazyInstance<Lock>::Leaky counts_lock = LAZY_INSTANCE_INITIALIZER;
478 if (counts_data_.load(std::memory_order_relaxed) == nullptr) {
479 AutoLock lock(counts_lock.Get());
480 if (counts_data_.load(std::memory_order_relaxed) == nullptr) {
481 // Create the actual counts storage while the above lock is acquired.
482 span<HistogramBase::Count> counts = CreateCountsStorageWhileLocked();
483 // Point |counts()| to the newly created storage. This is done while
484 // locked to prevent possible concurrent calls to CreateCountsStorage
485 // but, between that call and here, other threads could notice the
486 // existence of the storage and race with this to set_counts(). That's
487 // okay because (a) it's atomic and (b) it always writes the same value.
488 set_counts(counts);
489 }
490 }
491
492 // Move any single-sample into the newly mounted storage.
493 MoveSingleSampleToCounts();
494 }
495
SampleVector(const BucketRanges * bucket_ranges)496 SampleVector::SampleVector(const BucketRanges* bucket_ranges)
497 : SampleVector(0, bucket_ranges) {}
498
SampleVector(uint64_t id,const BucketRanges * bucket_ranges)499 SampleVector::SampleVector(uint64_t id, const BucketRanges* bucket_ranges)
500 : SampleVectorBase(id, std::make_unique<LocalMetadata>(), bucket_ranges) {}
501
502 SampleVector::~SampleVector() = default;
503
IsDefinitelyEmpty() const504 bool SampleVector::IsDefinitelyEmpty() const {
505 // If we are still using SingleSample, and it has a count of 0, then |this|
506 // has no samples. If we are not using SingleSample, always return false, even
507 // though it is possible that |this| has no samples (e.g. we are using a
508 // counts array and all the bucket counts are 0). If we are wrong, this will
509 // just make the caller perform some extra work thinking that |this| is
510 // non-empty.
511 AtomicSingleSample sample = single_sample();
512 return HistogramSamples::IsDefinitelyEmpty() && !sample.IsDisabled() &&
513 sample.Load().count == 0;
514 }
515
MountExistingCountsStorage() const516 bool SampleVector::MountExistingCountsStorage() const {
517 // There is never any existing storage other than what is already in use.
518 return counts().has_value();
519 }
520
GetAsciiHeader(std::string_view histogram_name,int32_t flags) const521 std::string SampleVector::GetAsciiHeader(std::string_view histogram_name,
522 int32_t flags) const {
523 Count sample_count = TotalCount();
524 std::string output;
525 StrAppend(&output, {"Histogram: ", histogram_name, " recorded ",
526 NumberToString(sample_count), " samples"});
527 if (sample_count == 0) {
528 DCHECK_EQ(sum(), 0);
529 } else {
530 double mean = static_cast<float>(sum()) / sample_count;
531 StringAppendF(&output, ", mean = %.1f", mean);
532 }
533 if (flags) {
534 StringAppendF(&output, " (flags = 0x%x)", flags);
535 }
536 return output;
537 }
538
GetAsciiBody() const539 std::string SampleVector::GetAsciiBody() const {
540 Count sample_count = TotalCount();
541
542 // Prepare to normalize graphical rendering of bucket contents.
543 double max_size = 0;
544 double scaling_factor = 1;
545 max_size = GetPeakBucketSize();
546 // Scale histogram bucket counts to take at most 72 characters.
547 // Note: Keep in sync w/ kLineLength histogram_samples.cc
548 const double kLineLength = 72;
549 if (max_size > kLineLength) {
550 scaling_factor = kLineLength / max_size;
551 }
552
553 // Calculate largest print width needed for any of our bucket range displays.
554 size_t print_width = 1;
555 for (uint32_t i = 0; i < bucket_count(); ++i) {
556 if (GetCountAtIndex(i)) {
557 size_t width =
558 GetSimpleAsciiBucketRange(bucket_ranges()->range(i)).size() + 1;
559 if (width > print_width) {
560 print_width = width;
561 }
562 }
563 }
564
565 int64_t remaining = sample_count;
566 int64_t past = 0;
567 std::string output;
568 // Output the actual histogram graph.
569 for (uint32_t i = 0; i < bucket_count(); ++i) {
570 Count current = GetCountAtIndex(i);
571 remaining -= current;
572 std::string range = GetSimpleAsciiBucketRange(bucket_ranges()->range(i));
573 output.append(range);
574 for (size_t j = 0; range.size() + j < print_width + 1; ++j) {
575 output.push_back(' ');
576 }
577 if (0 == current && i < bucket_count() - 1 && 0 == GetCountAtIndex(i + 1)) {
578 while (i < bucket_count() - 1 && 0 == GetCountAtIndex(i + 1)) {
579 ++i;
580 }
581 output.append("... \n");
582 continue; // No reason to plot emptiness.
583 }
584 Count current_size = round(current * scaling_factor);
585 WriteAsciiBucketGraph(current_size, kLineLength, &output);
586 WriteAsciiBucketContext(past, current, remaining, i, &output);
587 output.append("\n");
588 past += current;
589 }
590 DCHECK_EQ(sample_count, past);
591 return output;
592 }
593
GetPeakBucketSize() const594 double SampleVector::GetPeakBucketSize() const {
595 Count max = 0;
596 for (uint32_t i = 0; i < bucket_count(); ++i) {
597 Count current = GetCountAtIndex(i);
598 if (current > max) {
599 max = current;
600 }
601 }
602 return max;
603 }
604
WriteAsciiBucketContext(int64_t past,Count current,int64_t remaining,uint32_t current_bucket_index,std::string * output) const605 void SampleVector::WriteAsciiBucketContext(int64_t past,
606 Count current,
607 int64_t remaining,
608 uint32_t current_bucket_index,
609 std::string* output) const {
610 double scaled_sum = (past + current + remaining) / 100.0;
611 WriteAsciiBucketValue(current, scaled_sum, output);
612 if (0 < current_bucket_index) {
613 double percentage = past / scaled_sum;
614 StringAppendF(output, " {%3.1f%%}", percentage);
615 }
616 }
617
618 span<HistogramBase::AtomicCount>
CreateCountsStorageWhileLocked()619 SampleVector::CreateCountsStorageWhileLocked() {
620 local_counts_.resize(counts_size());
621 return local_counts_;
622 }
623
PersistentSampleVector(uint64_t id,const BucketRanges * bucket_ranges,Metadata * meta,const DelayedPersistentAllocation & counts)624 PersistentSampleVector::PersistentSampleVector(
625 uint64_t id,
626 const BucketRanges* bucket_ranges,
627 Metadata* meta,
628 const DelayedPersistentAllocation& counts)
629 : SampleVectorBase(id, meta, bucket_ranges), persistent_counts_(counts) {
630 // Only mount the full storage if the single-sample has been disabled.
631 // Otherwise, it is possible for this object instance to start using (empty)
632 // storage that was created incidentally while another instance continues to
633 // update to the single sample. This "incidental creation" can happen because
634 // the memory is a DelayedPersistentAllocation which allows multiple memory
635 // blocks within it and applies an all-or-nothing approach to the allocation.
636 // Thus, a request elsewhere for one of the _other_ blocks would make _this_
637 // block available even though nothing has explicitly requested it.
638 //
639 // Note that it's not possible for the ctor to mount existing storage and
640 // move any single-sample to it because sometimes the persistent memory is
641 // read-only. Only non-const methods (which assume that memory is read/write)
642 // can do that.
643 if (single_sample().IsDisabled()) {
644 bool success = MountExistingCountsStorage();
645 DCHECK(success);
646 }
647 }
648
649 PersistentSampleVector::~PersistentSampleVector() = default;
650
IsDefinitelyEmpty() const651 bool PersistentSampleVector::IsDefinitelyEmpty() const {
652 // Not implemented.
653 NOTREACHED();
654
655 // Always return false. If we are wrong, this will just make the caller
656 // perform some extra work thinking that |this| is non-empty.
657 return false;
658 }
659
MountExistingCountsStorage() const660 bool PersistentSampleVector::MountExistingCountsStorage() const {
661 // There is no early exit if counts is not yet mounted because, given that
662 // this is a virtual function, it's more efficient to do that at the call-
663 // site. There is no danger, however, should this get called anyway (perhaps
664 // because of a race condition) because at worst the `counts_data_` and
665 // `counts_size_` members would be over-written (in an atomic manner)
666 // with the exact same values.
667
668 if (!persistent_counts_.reference()) {
669 return false; // Nothing to mount.
670 }
671
672 // Mount the counts array in position. This shouldn't fail but can if the
673 // data is corrupt or incomplete.
674 span<HistogramBase::AtomicCount> mem =
675 persistent_counts_.Get<HistogramBase::AtomicCount>();
676 if (mem.empty()) {
677 return false;
678 }
679 // Uses a span that only covers the counts the SampleVector should have
680 // access to, which can be a subset of the entire persistent allocation.
681 set_counts(mem.first(counts_size()));
682 return true;
683 }
684
685 span<HistogramBase::AtomicCount>
CreateCountsStorageWhileLocked()686 PersistentSampleVector::CreateCountsStorageWhileLocked() {
687 span<HistogramBase::AtomicCount> mem =
688 persistent_counts_.Get<HistogramBase::AtomicCount>();
689 if (mem.empty()) {
690 // The above shouldn't fail but can if Bad Things(tm) are occurring in
691 // the persistent allocator. Crashing isn't a good option so instead
692 // just allocate something from the heap that we will leak and return that.
693 // There will be no sharing or persistence but worse things are already
694 // happening.
695 auto array = HeapArray<HistogramBase::AtomicCount>::WithSize(counts_size());
696 ANNOTATE_LEAKING_OBJECT_PTR(array.data());
697 return std::move(array).leak();
698 }
699
700 // Returns a span that only covers the counts the SampleVector should have
701 // access to, which can be a subset of the entire persistent allocation.
702 return mem.first(counts_size());
703 }
704
705 } // namespace base
706