1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "profile_compilation_info.h"
18
19 #include <fcntl.h>
20 #include <sys/file.h>
21 #include <sys/stat.h>
22 #include <sys/types.h>
23 #include <unistd.h>
24 #include <zlib.h>
25
26 #include <algorithm>
27 #include <cerrno>
28 #include <climits>
29 #include <cstdio>
30 #include <cstdlib>
31 #include <iostream>
32 #include <numeric>
33 #include <random>
34 #include <string>
35 #include <unordered_map>
36 #include <unordered_set>
37 #include <vector>
38
39 #include "android-base/file.h"
40 #include "android-base/properties.h"
41 #include "android-base/scopeguard.h"
42 #include "android-base/strings.h"
43 #include "android-base/unique_fd.h"
44 #include "base/arena_allocator.h"
45 #include "base/bit_utils.h"
46 #include "base/dumpable.h"
47 #include "base/file_utils.h"
48 #include "base/globals.h"
49 #include "base/logging.h" // For VLOG.
50 #include "base/malloc_arena_pool.h"
51 #include "base/os.h"
52 #include "base/safe_map.h"
53 #include "base/scoped_flock.h"
54 #include "base/stl_util.h"
55 #include "base/systrace.h"
56 #include "base/time_utils.h"
57 #include "base/unix_file/fd_file.h"
58 #include "base/utils.h"
59 #include "base/zip_archive.h"
60 #include "dex/code_item_accessors-inl.h"
61 #include "dex/descriptors_names.h"
62 #include "dex/dex_file_loader.h"
63 #include "dex/dex_instruction-inl.h"
64
65 #ifdef ART_TARGET_ANDROID
66 #include "android-modules-utils/sdk_level.h"
67 #endif
68
69 namespace art {
70
71 const uint8_t ProfileCompilationInfo::kProfileMagic[] = { 'p', 'r', 'o', '\0' };
72 // Last profile version: New extensible profile format.
73 const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '1', '5', '\0' };
74 const uint8_t ProfileCompilationInfo::kProfileVersionForBootImage[] = { '0', '1', '6', '\0' };
75
76 static_assert(sizeof(ProfileCompilationInfo::kProfileVersion) == 4,
77 "Invalid profile version size");
78 static_assert(sizeof(ProfileCompilationInfo::kProfileVersionForBootImage) == 4,
79 "Invalid profile version size");
80
81 // The name of the profile entry in the dex metadata file.
82 // DO NOT CHANGE THIS! (it's similar to classes.dex in the apk files).
83 const char ProfileCompilationInfo::kDexMetadataProfileEntry[] = "primary.prof";
84
85 // A synthetic annotations that can be used to denote that no annotation should
86 // be associated with the profile samples. We use the empty string for the package name
87 // because that's an invalid package name and should never occur in practice.
88 const ProfileCompilationInfo::ProfileSampleAnnotation
89 ProfileCompilationInfo::ProfileSampleAnnotation::kNone =
90 ProfileCompilationInfo::ProfileSampleAnnotation("");
91
92 static constexpr char kSampleMetadataSeparator = ':';
93
94 // Note: This used to be PATH_MAX (usually 4096) but that seems excessive
95 // and we do not want to rely on that external constant anyway.
96 static constexpr uint16_t kMaxDexFileKeyLength = 1024;
97
98 // Extra descriptors are serialized with a `uint16_t` prefix. This defines the length limit.
99 static constexpr size_t kMaxExtraDescriptorLength = std::numeric_limits<uint16_t>::max();
100
101 // According to dex file specification, there can be more than 2^16 valid method indexes
102 // but bytecode uses only 16 bits, so higher method indexes are not very useful (though
103 // such methods could be reached through virtual or interface dispatch). Consequently,
104 // dex files with more than 2^16 method indexes are not really used and the profile file
105 // format does not support higher method indexes.
106 static constexpr uint32_t kMaxSupportedMethodIndex = 0xffffu;
107
108 // Debug flag to ignore checksums when testing if a method or a class is present in the profile.
109 // Used to facilitate testing profile guided compilation across a large number of apps
110 // using the same test profile.
111 static constexpr bool kDebugIgnoreChecksum = false;
112
113 static constexpr uint8_t kIsMissingTypesEncoding = 6;
114 static constexpr uint8_t kIsMegamorphicEncoding = 7;
115
116 static_assert(sizeof(ProfileCompilationInfo::kIndividualInlineCacheSize) == sizeof(uint8_t),
117 "InlineCache::kIndividualInlineCacheSize does not have the expect type size");
118 static_assert(ProfileCompilationInfo::kIndividualInlineCacheSize < kIsMegamorphicEncoding,
119 "InlineCache::kIndividualInlineCacheSize is larger than expected");
120 static_assert(ProfileCompilationInfo::kIndividualInlineCacheSize < kIsMissingTypesEncoding,
121 "InlineCache::kIndividualInlineCacheSize is larger than expected");
122
123 static constexpr uint32_t kSizeWarningThresholdBytes = 5000000U;
124 static constexpr uint32_t kSizeErrorThresholdBytes = 15000000U;
125
126 static constexpr uint32_t kSizeWarningThresholdBootBytes = 25000000U;
127 static constexpr uint32_t kSizeErrorThresholdBootBytes = 100000000U;
128
ChecksumMatch(uint32_t dex_file_checksum,uint32_t checksum)129 static bool ChecksumMatch(uint32_t dex_file_checksum, uint32_t checksum) {
130 return kDebugIgnoreChecksum || dex_file_checksum == checksum;
131 }
132
133 namespace {
134
135 // Deflate the input buffer `in_buffer`. It returns a buffer of
136 // compressed data for the input buffer of `*compressed_data_size` size.
DeflateBuffer(ArrayRef<const uint8_t> in_buffer,uint32_t * compressed_data_size)137 std::unique_ptr<uint8_t[]> DeflateBuffer(ArrayRef<const uint8_t> in_buffer,
138 /*out*/ uint32_t* compressed_data_size) {
139 z_stream strm;
140 strm.zalloc = Z_NULL;
141 strm.zfree = Z_NULL;
142 strm.opaque = Z_NULL;
143 int init_ret = deflateInit(&strm, 1);
144 if (init_ret != Z_OK) {
145 return nullptr;
146 }
147
148 uint32_t out_size = dchecked_integral_cast<uint32_t>(deflateBound(&strm, in_buffer.size()));
149
150 std::unique_ptr<uint8_t[]> compressed_buffer(new uint8_t[out_size]);
151 strm.avail_in = in_buffer.size();
152 strm.next_in = const_cast<uint8_t*>(in_buffer.data());
153 strm.avail_out = out_size;
154 strm.next_out = &compressed_buffer[0];
155 int ret = deflate(&strm, Z_FINISH);
156 if (ret == Z_STREAM_ERROR) {
157 return nullptr;
158 }
159 *compressed_data_size = out_size - strm.avail_out;
160
161 int end_ret = deflateEnd(&strm);
162 if (end_ret != Z_OK) {
163 return nullptr;
164 }
165
166 return compressed_buffer;
167 }
168
169 // Inflate the data from `in_buffer` into `out_buffer`. The `out_buffer.size()`
170 // is the expected output size of the buffer. It returns Z_STREAM_END on success.
171 // On error, it returns Z_STREAM_ERROR if the compressed data is inconsistent
172 // and Z_DATA_ERROR if the stream ended prematurely or the stream has extra data.
InflateBuffer(ArrayRef<const uint8_t> in_buffer,ArrayRef<uint8_t> out_buffer)173 int InflateBuffer(ArrayRef<const uint8_t> in_buffer, /*out*/ ArrayRef<uint8_t> out_buffer) {
174 /* allocate inflate state */
175 z_stream strm;
176 strm.zalloc = Z_NULL;
177 strm.zfree = Z_NULL;
178 strm.opaque = Z_NULL;
179 strm.avail_in = in_buffer.size();
180 strm.next_in = const_cast<uint8_t*>(in_buffer.data());
181 strm.avail_out = out_buffer.size();
182 strm.next_out = out_buffer.data();
183
184 int init_ret = inflateInit(&strm);
185 if (init_ret != Z_OK) {
186 return init_ret;
187 }
188
189 int ret = inflate(&strm, Z_NO_FLUSH);
190 if (strm.avail_in != 0 || strm.avail_out != 0) {
191 return Z_DATA_ERROR;
192 }
193
194 int end_ret = inflateEnd(&strm);
195 if (end_ret != Z_OK) {
196 return end_ret;
197 }
198
199 return ret;
200 }
201
202 } // anonymous namespace
203
204 enum class ProfileCompilationInfo::ProfileLoadStatus : uint32_t {
205 kSuccess,
206 kIOError,
207 kBadMagic,
208 kVersionMismatch,
209 kBadData,
210 kMergeError, // Merging failed. There are too many extra descriptors
211 // or classes without TypeId referenced by a dex file.
212 };
213
214 enum class ProfileCompilationInfo::FileSectionType : uint32_t {
215 // The values of section enumerators and data format for individual sections
216 // must not be changed without changing the profile file version. New sections
217 // can be added at the end and they shall be ignored by old versions of ART.
218
219 // The list of the dex files included in the profile.
220 // There must be exactly one dex file section and it must be first.
221 kDexFiles = 0,
222
223 // Extra descriptors for referencing classes that do not have a `dex::TypeId`
224 // in the referencing dex file, such as classes from a different dex file
225 // (even outside of the dex files in the profile) or array classes that were
226 // used from other dex files or created through reflection.
227 kExtraDescriptors = 1,
228
229 // Classes included in the profile.
230 kClasses = 2,
231
232 // Methods included in the profile, their hotness flags and inline caches.
233 kMethods = 3,
234
235 // The aggregation counts of the profile, classes and methods. This section is
236 // an optional reserved section not implemented on client yet.
237 kAggregationCounts = 4,
238
239 // The number of known sections.
240 kNumberOfSections = 5
241 };
242
243 class ProfileCompilationInfo::FileSectionInfo {
244 public:
245 // Constructor for reading from a `ProfileSource`. Data shall be filled from the source.
FileSectionInfo()246 FileSectionInfo() {}
247
248 // Constructor for writing to a file.
FileSectionInfo(FileSectionType type,uint32_t file_offset,uint32_t file_size,uint32_t inflated_size)249 FileSectionInfo(FileSectionType type,
250 uint32_t file_offset,
251 uint32_t file_size,
252 uint32_t inflated_size)
253 : type_(type),
254 file_offset_(file_offset),
255 file_size_(file_size),
256 inflated_size_(inflated_size) {}
257
SetFileOffset(uint32_t file_offset)258 void SetFileOffset(uint32_t file_offset) {
259 DCHECK_EQ(file_offset_, 0u);
260 DCHECK_NE(file_offset, 0u);
261 file_offset_ = file_offset;
262 }
263
GetType() const264 FileSectionType GetType() const {
265 return type_;
266 }
267
GetFileOffset() const268 uint32_t GetFileOffset() const {
269 return file_offset_;
270 }
271
GetFileSize() const272 uint32_t GetFileSize() const {
273 return file_size_;
274 }
275
GetInflatedSize() const276 uint32_t GetInflatedSize() const {
277 return inflated_size_;
278 }
279
GetMemSize() const280 uint32_t GetMemSize() const {
281 return inflated_size_ != 0u ? inflated_size_ : file_size_;
282 }
283
284 private:
285 FileSectionType type_;
286 uint32_t file_offset_;
287 uint32_t file_size_;
288 uint32_t inflated_size_; // If 0, do not inflate and use data from file directly.
289 };
290
291 // The file header.
292 class ProfileCompilationInfo::FileHeader {
293 public:
294 // Constructor for reading from a `ProfileSource`. Data shall be filled from the source.
FileHeader()295 FileHeader() {
296 DCHECK(!IsValid());
297 }
298
299 // Constructor for writing to a file.
FileHeader(const uint8_t * version,uint32_t file_section_count)300 FileHeader(const uint8_t* version, uint32_t file_section_count)
301 : file_section_count_(file_section_count) {
302 static_assert(sizeof(magic_) == sizeof(kProfileMagic));
303 static_assert(sizeof(version_) == sizeof(kProfileVersion));
304 static_assert(sizeof(version_) == sizeof(kProfileVersionForBootImage));
305 memcpy(magic_, kProfileMagic, sizeof(kProfileMagic));
306 memcpy(version_, version, sizeof(version_));
307 DCHECK_LE(file_section_count, kMaxFileSectionCount);
308 DCHECK(IsValid());
309 }
310
IsValid() const311 bool IsValid() const {
312 return memcmp(magic_, kProfileMagic, sizeof(kProfileMagic)) == 0 &&
313 (memcmp(version_, kProfileVersion, kProfileVersionSize) == 0 ||
314 memcmp(version_, kProfileVersionForBootImage, kProfileVersionSize) == 0) &&
315 file_section_count_ != 0u && // The dex files section is mandatory.
316 file_section_count_ <= kMaxFileSectionCount;
317 }
318
GetVersion() const319 const uint8_t* GetVersion() const {
320 DCHECK(IsValid());
321 return version_;
322 }
323
324 ProfileLoadStatus InvalidHeaderMessage(/*out*/ std::string* error_msg) const;
325
GetFileSectionCount() const326 uint32_t GetFileSectionCount() const {
327 DCHECK(IsValid());
328 return file_section_count_;
329 }
330
331 private:
332 // The upper bound for file section count is used to ensure that there
333 // shall be no arithmetic overflow when calculating size of the header
334 // with section information.
335 static const uint32_t kMaxFileSectionCount;
336
337 uint8_t magic_[4] = {0, 0, 0, 0};
338 uint8_t version_[4] = {0, 0, 0, 0};
339 uint32_t file_section_count_ = 0u;
340 };
341
342 const uint32_t ProfileCompilationInfo::FileHeader::kMaxFileSectionCount =
343 (std::numeric_limits<uint32_t>::max() - sizeof(FileHeader)) / sizeof(FileSectionInfo);
344
345 ProfileCompilationInfo::ProfileLoadStatus
InvalidHeaderMessage(std::string * error_msg) const346 ProfileCompilationInfo::FileHeader::InvalidHeaderMessage(/*out*/ std::string* error_msg) const {
347 if (memcmp(magic_, kProfileMagic, sizeof(kProfileMagic)) != 0) {
348 *error_msg = "Profile missing magic.";
349 return ProfileLoadStatus::kBadMagic;
350 }
351 if (memcmp(version_, kProfileVersion, sizeof(kProfileVersion)) != 0 &&
352 memcmp(version_, kProfileVersion, sizeof(kProfileVersionForBootImage)) != 0) {
353 *error_msg = "Profile version mismatch.";
354 return ProfileLoadStatus::kVersionMismatch;
355 }
356 if (file_section_count_ == 0u) {
357 *error_msg = "Missing mandatory dex files section.";
358 return ProfileLoadStatus::kBadData;
359 }
360 DCHECK_GT(file_section_count_, kMaxFileSectionCount);
361 *error_msg ="Too many sections.";
362 return ProfileLoadStatus::kBadData;
363 }
364
365 /**
366 * Encapsulate the source of profile data for loading.
367 * The source can be either a plain file or a zip file.
368 * For zip files, the profile entry will be extracted to
369 * the memory map.
370 */
371 class ProfileCompilationInfo::ProfileSource {
372 public:
373 /**
374 * Create a profile source for the given fd. The ownership of the fd
375 * remains to the caller; as this class will not attempt to close it at any
376 * point.
377 */
Create(int32_t fd)378 static ProfileSource* Create(int32_t fd) {
379 DCHECK_GT(fd, -1);
380 return new ProfileSource(fd, MemMap::Invalid());
381 }
382
383 /**
384 * Create a profile source backed by a memory map. The map can be null in
385 * which case it will the treated as an empty source.
386 */
Create(MemMap && mem_map)387 static ProfileSource* Create(MemMap&& mem_map) {
388 return new ProfileSource(/*fd*/ -1, std::move(mem_map));
389 }
390
391 // Seek to the given offset in the source.
392 bool Seek(off_t offset);
393
394 /**
395 * Read bytes from this source.
396 * Reading will advance the current source position so subsequent
397 * invocations will read from the las position.
398 */
399 ProfileLoadStatus Read(void* buffer,
400 size_t byte_count,
401 const std::string& debug_stage,
402 std::string* error);
403
404 /** Return true if the source has 0 data. */
405 bool HasEmptyContent() const;
406
407 private:
ProfileSource(int32_t fd,MemMap && mem_map)408 ProfileSource(int32_t fd, MemMap&& mem_map)
409 : fd_(fd), mem_map_(std::move(mem_map)), mem_map_cur_(0) {}
410
IsMemMap() const411 bool IsMemMap() const {
412 return fd_ == -1;
413 }
414
415 int32_t fd_; // The fd is not owned by this class.
416 MemMap mem_map_;
417 size_t mem_map_cur_; // Current position in the map to read from.
418 };
419
420 // A helper structure to make sure we don't read past our buffers in the loops.
421 // Also used for writing but the buffer should be pre-sized correctly for that, so we
422 // DCHECK() we do not write beyond the end, rather than returning `false` on failure.
423 class ProfileCompilationInfo::SafeBuffer {
424 public:
SafeBuffer()425 SafeBuffer()
426 : storage_(nullptr),
427 ptr_current_(nullptr),
428 ptr_end_(nullptr) {}
429
SafeBuffer(size_t size)430 explicit SafeBuffer(size_t size)
431 : storage_(new uint8_t[size]),
432 ptr_current_(storage_.get()),
433 ptr_end_(ptr_current_ + size) {}
434
435 // Reads an uint value and advances the current pointer.
436 template <typename T>
ReadUintAndAdvance(T * value)437 bool ReadUintAndAdvance(/*out*/ T* value) {
438 static_assert(std::is_unsigned<T>::value, "Type is not unsigned");
439 if (sizeof(T) > GetAvailableBytes()) {
440 return false;
441 }
442 *value = 0;
443 for (size_t i = 0; i < sizeof(T); i++) {
444 *value += ptr_current_[i] << (i * kBitsPerByte);
445 }
446 ptr_current_ += sizeof(T);
447 return true;
448 }
449
450 // Reads a length-prefixed string as `std::string_view` and advances the current pointer.
451 // The length is `uint16_t`.
ReadStringAndAdvance(std::string_view * value)452 bool ReadStringAndAdvance(/*out*/ std::string_view* value) {
453 uint16_t length;
454 if (!ReadUintAndAdvance(&length)) {
455 return false;
456 }
457 if (length > GetAvailableBytes()) {
458 return false;
459 }
460 const void* null_char = memchr(GetCurrentPtr(), 0, length);
461 if (null_char != nullptr) {
462 // Embedded nulls are invalid.
463 return false;
464 }
465 *value = std::string_view(reinterpret_cast<const char*>(GetCurrentPtr()), length);
466 Advance(length);
467 return true;
468 }
469
470 // Compares the given data with the content at the current pointer.
471 // If the contents are equal it advances the current pointer by data_size.
CompareAndAdvance(const uint8_t * data,size_t data_size)472 bool CompareAndAdvance(const uint8_t* data, size_t data_size) {
473 if (data_size > GetAvailableBytes()) {
474 return false;
475 }
476 if (memcmp(ptr_current_, data, data_size) == 0) {
477 ptr_current_ += data_size;
478 return true;
479 }
480 return false;
481 }
482
WriteAndAdvance(const void * data,size_t data_size)483 void WriteAndAdvance(const void* data, size_t data_size) {
484 DCHECK_LE(data_size, GetAvailableBytes());
485 memcpy(ptr_current_, data, data_size);
486 ptr_current_ += data_size;
487 }
488
489 template <typename T>
WriteUintAndAdvance(T value)490 void WriteUintAndAdvance(T value) {
491 static_assert(std::is_integral_v<T>);
492 WriteAndAdvance(&value, sizeof(value));
493 }
494
495 // Deflate a filled buffer. Replaces the internal buffer with a new one, also filled.
Deflate()496 bool Deflate() {
497 DCHECK_EQ(GetAvailableBytes(), 0u);
498 DCHECK_NE(Size(), 0u);
499 ArrayRef<const uint8_t> in_buffer(Get(), Size());
500 uint32_t output_size = 0;
501 std::unique_ptr<uint8_t[]> compressed_buffer = DeflateBuffer(in_buffer, &output_size);
502 if (compressed_buffer == nullptr) {
503 return false;
504 }
505 storage_ = std::move(compressed_buffer);
506 ptr_current_ = storage_.get() + output_size;
507 ptr_end_ = ptr_current_;
508 return true;
509 }
510
511 // Inflate an unread buffer. Replaces the internal buffer with a new one, also unread.
Inflate(size_t uncompressed_data_size)512 bool Inflate(size_t uncompressed_data_size) {
513 DCHECK(ptr_current_ == storage_.get());
514 DCHECK_NE(Size(), 0u);
515 ArrayRef<const uint8_t> in_buffer(Get(), Size());
516 SafeBuffer uncompressed_buffer(uncompressed_data_size);
517 ArrayRef<uint8_t> out_buffer(uncompressed_buffer.Get(), uncompressed_data_size);
518 int ret = InflateBuffer(in_buffer, out_buffer);
519 if (ret != Z_STREAM_END) {
520 return false;
521 }
522 Swap(uncompressed_buffer);
523 DCHECK(ptr_current_ == storage_.get());
524 return true;
525 }
526
527 // Advances current pointer by data_size.
Advance(size_t data_size)528 void Advance(size_t data_size) {
529 DCHECK_LE(data_size, GetAvailableBytes());
530 ptr_current_ += data_size;
531 }
532
533 // Returns the count of unread bytes.
GetAvailableBytes() const534 size_t GetAvailableBytes() const {
535 DCHECK_LE(static_cast<void*>(ptr_current_), static_cast<void*>(ptr_end_));
536 return (ptr_end_ - ptr_current_) * sizeof(*ptr_current_);
537 }
538
539 // Returns the current pointer.
GetCurrentPtr()540 uint8_t* GetCurrentPtr() {
541 return ptr_current_;
542 }
543
544 // Get the underlying raw buffer.
Get()545 uint8_t* Get() {
546 return storage_.get();
547 }
548
549 // Get the size of the raw buffer.
Size() const550 size_t Size() const {
551 return ptr_end_ - storage_.get();
552 }
553
Swap(SafeBuffer & other)554 void Swap(SafeBuffer& other) {
555 std::swap(storage_, other.storage_);
556 std::swap(ptr_current_, other.ptr_current_);
557 std::swap(ptr_end_, other.ptr_end_);
558 }
559
560 private:
561 std::unique_ptr<uint8_t[]> storage_;
562 uint8_t* ptr_current_;
563 uint8_t* ptr_end_;
564 };
565
ProfileCompilationInfo(ArenaPool * custom_arena_pool,bool for_boot_image)566 ProfileCompilationInfo::ProfileCompilationInfo(ArenaPool* custom_arena_pool, bool for_boot_image)
567 : default_arena_pool_(),
568 allocator_(custom_arena_pool),
569 info_(allocator_.Adapter(kArenaAllocProfile)),
570 profile_key_map_(std::less<const std::string_view>(), allocator_.Adapter(kArenaAllocProfile)),
571 extra_descriptors_(),
572 extra_descriptors_indexes_(ExtraDescriptorHash(&extra_descriptors_),
573 ExtraDescriptorEquals(&extra_descriptors_)) {
574 memcpy(version_,
575 for_boot_image ? kProfileVersionForBootImage : kProfileVersion,
576 kProfileVersionSize);
577 }
578
ProfileCompilationInfo(ArenaPool * custom_arena_pool)579 ProfileCompilationInfo::ProfileCompilationInfo(ArenaPool* custom_arena_pool)
580 : ProfileCompilationInfo(custom_arena_pool, /*for_boot_image=*/ false) { }
581
ProfileCompilationInfo()582 ProfileCompilationInfo::ProfileCompilationInfo()
583 : ProfileCompilationInfo(/*for_boot_image=*/ false) { }
584
ProfileCompilationInfo(bool for_boot_image)585 ProfileCompilationInfo::ProfileCompilationInfo(bool for_boot_image)
586 : ProfileCompilationInfo(&default_arena_pool_, for_boot_image) { }
587
~ProfileCompilationInfo()588 ProfileCompilationInfo::~ProfileCompilationInfo() {
589 VLOG(profiler) << Dumpable<MemStats>(allocator_.GetMemStats());
590 }
591
AddClass(const dex::TypeIndex & type_idx)592 void ProfileCompilationInfo::DexPcData::AddClass(const dex::TypeIndex& type_idx) {
593 if (is_megamorphic || is_missing_types) {
594 return;
595 }
596
597 // Perform an explicit lookup for the type instead of directly emplacing the
598 // element. We do this because emplace() allocates the node before doing the
599 // lookup and if it then finds an identical element, it shall deallocate the
600 // node. For Arena allocations, that's essentially a leak.
601 auto lb = classes.lower_bound(type_idx);
602 if (lb != classes.end() && *lb == type_idx) {
603 // The type index exists.
604 return;
605 }
606
607 // Check if the adding the type will cause the cache to become megamorphic.
608 if (classes.size() + 1 >= ProfileCompilationInfo::kIndividualInlineCacheSize) {
609 is_megamorphic = true;
610 classes.clear();
611 return;
612 }
613
614 // The type does not exist and the inline cache will not be megamorphic.
615 classes.emplace_hint(lb, type_idx);
616 }
617
618 // Transform the actual dex location into a key used to index the dex file in the profile.
619 // See ProfileCompilationInfo#GetProfileDexFileBaseKey as well.
GetProfileDexFileAugmentedKey(const std::string & dex_location,const ProfileSampleAnnotation & annotation)620 std::string ProfileCompilationInfo::GetProfileDexFileAugmentedKey(
621 const std::string& dex_location,
622 const ProfileSampleAnnotation& annotation) {
623 std::string base_key = GetProfileDexFileBaseKey(dex_location);
624 return annotation == ProfileSampleAnnotation::kNone
625 ? base_key
626 : base_key + kSampleMetadataSeparator + annotation.GetOriginPackageName();;
627 }
628
629 // Transform the actual dex location into a base profile key (represented as relative paths).
630 // Note: this is OK because we don't store profiles of different apps into the same file.
631 // Apps with split apks don't cause trouble because each split has a different name and will not
632 // collide with other entries.
GetProfileDexFileBaseKeyView(std::string_view dex_location)633 std::string_view ProfileCompilationInfo::GetProfileDexFileBaseKeyView(
634 std::string_view dex_location) {
635 DCHECK(!dex_location.empty());
636 size_t last_sep_index = dex_location.find_last_of('/');
637 if (last_sep_index == std::string::npos) {
638 return dex_location;
639 } else {
640 DCHECK(last_sep_index < dex_location.size());
641 return dex_location.substr(last_sep_index + 1);
642 }
643 }
644
GetProfileDexFileBaseKey(const std::string & dex_location)645 std::string ProfileCompilationInfo::GetProfileDexFileBaseKey(const std::string& dex_location) {
646 // Note: Conversions between std::string and std::string_view.
647 return std::string(GetProfileDexFileBaseKeyView(dex_location));
648 }
649
GetBaseKeyViewFromAugmentedKey(std::string_view profile_key)650 std::string_view ProfileCompilationInfo::GetBaseKeyViewFromAugmentedKey(
651 std::string_view profile_key) {
652 size_t pos = profile_key.rfind(kSampleMetadataSeparator);
653 return (pos == std::string::npos) ? profile_key : profile_key.substr(0, pos);
654 }
655
GetBaseKeyFromAugmentedKey(const std::string & profile_key)656 std::string ProfileCompilationInfo::GetBaseKeyFromAugmentedKey(
657 const std::string& profile_key) {
658 // Note: Conversions between std::string and std::string_view.
659 return std::string(GetBaseKeyViewFromAugmentedKey(profile_key));
660 }
661
MigrateAnnotationInfo(const std::string & base_key,const std::string & augmented_key)662 std::string ProfileCompilationInfo::MigrateAnnotationInfo(
663 const std::string& base_key,
664 const std::string& augmented_key) {
665 size_t pos = augmented_key.rfind(kSampleMetadataSeparator);
666 return (pos == std::string::npos)
667 ? base_key
668 : base_key + augmented_key.substr(pos);
669 }
670
GetAnnotationFromKey(const std::string & augmented_key)671 ProfileCompilationInfo::ProfileSampleAnnotation ProfileCompilationInfo::GetAnnotationFromKey(
672 const std::string& augmented_key) {
673 size_t pos = augmented_key.rfind(kSampleMetadataSeparator);
674 return (pos == std::string::npos)
675 ? ProfileSampleAnnotation::kNone
676 : ProfileSampleAnnotation(augmented_key.substr(pos + 1));
677 }
678
AddMethods(const std::vector<ProfileMethodInfo> & methods,MethodHotness::Flag flags,const ProfileSampleAnnotation & annotation,bool is_test)679 bool ProfileCompilationInfo::AddMethods(const std::vector<ProfileMethodInfo>& methods,
680 MethodHotness::Flag flags,
681 const ProfileSampleAnnotation& annotation,
682 bool is_test) {
683 for (const ProfileMethodInfo& method : methods) {
684 if (!AddMethod(method, flags, annotation, is_test)) {
685 return false;
686 }
687 }
688 return true;
689 }
690
FindOrCreateTypeIndex(const DexFile & dex_file,TypeReference class_ref)691 dex::TypeIndex ProfileCompilationInfo::FindOrCreateTypeIndex(const DexFile& dex_file,
692 TypeReference class_ref) {
693 DCHECK(class_ref.dex_file != nullptr);
694 DCHECK_LT(class_ref.TypeIndex().index_, class_ref.dex_file->NumTypeIds());
695 if (class_ref.dex_file == &dex_file) {
696 // We can use the type index from the `class_ref` as it's a valid index in the `dex_file`.
697 return class_ref.TypeIndex();
698 }
699 // Try to find a `TypeId` in the method's dex file.
700 std::string_view descriptor = class_ref.dex_file->GetTypeDescriptorView(class_ref.TypeIndex());
701 return FindOrCreateTypeIndex(dex_file, descriptor);
702 }
703
FindOrCreateTypeIndex(const DexFile & dex_file,std::string_view descriptor)704 dex::TypeIndex ProfileCompilationInfo::FindOrCreateTypeIndex(const DexFile& dex_file,
705 std::string_view descriptor) {
706 const dex::TypeId* type_id = dex_file.FindTypeId(descriptor);
707 if (type_id != nullptr) {
708 return dex_file.GetIndexForTypeId(*type_id);
709 }
710 // Try to find an existing extra descriptor.
711 uint32_t num_type_ids = dex_file.NumTypeIds();
712 uint32_t max_artificial_ids = DexFile::kDexNoIndex16 - num_type_ids;
713 // Check descriptor length for "extra descriptor". We are using `uint16_t` as prefix.
714 if (UNLIKELY(descriptor.size() > kMaxExtraDescriptorLength)) {
715 return dex::TypeIndex(); // Invalid.
716 }
717 auto it = extra_descriptors_indexes_.find(descriptor);
718 if (it != extra_descriptors_indexes_.end()) {
719 return (*it < max_artificial_ids) ? dex::TypeIndex(num_type_ids + *it) : dex::TypeIndex();
720 }
721 // Check if inserting the extra descriptor yields a valid artificial type index.
722 if (UNLIKELY(extra_descriptors_.size() >= max_artificial_ids)) {
723 return dex::TypeIndex(); // Invalid.
724 }
725 // Add the descriptor to extra descriptors and return the artificial type index.
726 ExtraDescriptorIndex new_extra_descriptor_index = AddExtraDescriptor(descriptor);
727 DCHECK_LT(new_extra_descriptor_index, max_artificial_ids);
728 return dex::TypeIndex(num_type_ids + new_extra_descriptor_index);
729 }
730
AddClass(const DexFile & dex_file,std::string_view descriptor,const ProfileSampleAnnotation & annotation)731 bool ProfileCompilationInfo::AddClass(const DexFile& dex_file,
732 std::string_view descriptor,
733 const ProfileSampleAnnotation& annotation) {
734 DexFileData* const data = GetOrAddDexFileData(&dex_file, annotation);
735 if (data == nullptr) { // checksum mismatch
736 return false;
737 }
738 dex::TypeIndex type_index = FindOrCreateTypeIndex(dex_file, descriptor);
739 if (!type_index.IsValid()) {
740 return false;
741 }
742 data->class_set.insert(type_index);
743 return true;
744 }
745
MergeWith(const std::string & filename)746 bool ProfileCompilationInfo::MergeWith(const std::string& filename) {
747 std::string error;
748 #ifdef _WIN32
749 int flags = O_RDONLY;
750 #else
751 int flags = O_RDONLY | O_NOFOLLOW | O_CLOEXEC;
752 #endif
753 ScopedFlock profile_file =
754 LockedFile::Open(filename.c_str(), flags, /*block=*/false, &error);
755
756 if (profile_file.get() == nullptr) {
757 LOG(WARNING) << "Couldn't lock the profile file " << filename << ": " << error;
758 return false;
759 }
760
761 int fd = profile_file->Fd();
762
763 ProfileLoadStatus status = LoadInternal(fd, &error);
764 if (status == ProfileLoadStatus::kSuccess) {
765 return true;
766 }
767
768 LOG(WARNING) << "Could not load profile data from file " << filename << ": " << error;
769 return false;
770 }
771
Load(const std::string & filename,bool clear_if_invalid)772 bool ProfileCompilationInfo::Load(const std::string& filename, bool clear_if_invalid) {
773 ScopedTrace trace(__PRETTY_FUNCTION__);
774 std::string error;
775
776 if (!IsEmpty()) {
777 return false;
778 }
779
780 #ifdef _WIN32
781 int flags = O_RDWR;
782 #else
783 int flags = O_RDWR | O_NOFOLLOW | O_CLOEXEC;
784 #endif
785 // There's no need to fsync profile data right away. We get many chances
786 // to write it again in case something goes wrong. We can rely on a simple
787 // close(), no sync, and let to the kernel decide when to write to disk.
788 ScopedFlock profile_file =
789 LockedFile::Open(filename.c_str(), flags, /*block=*/false, &error);
790
791 if (profile_file.get() == nullptr) {
792 if (clear_if_invalid && errno == ENOENT) {
793 return true;
794 }
795 LOG(WARNING) << "Couldn't lock the profile file " << filename << ": " << error;
796 return false;
797 }
798
799 int fd = profile_file->Fd();
800
801 ProfileLoadStatus status = LoadInternal(fd, &error);
802 if (status == ProfileLoadStatus::kSuccess) {
803 return true;
804 }
805
806 if (clear_if_invalid &&
807 ((status == ProfileLoadStatus::kBadMagic) ||
808 (status == ProfileLoadStatus::kVersionMismatch) ||
809 (status == ProfileLoadStatus::kBadData))) {
810 LOG(WARNING) << "Clearing bad or obsolete profile data from file "
811 << filename << ": " << error;
812 // When ART Service is enabled, this is the only place where we mutate a profile in place.
813 // TODO(jiakaiz): Get rid of this.
814 if (profile_file->ClearContent()) {
815 return true;
816 } else {
817 PLOG(WARNING) << "Could not clear profile file: " << filename;
818 return false;
819 }
820 }
821
822 LOG(WARNING) << "Could not load profile data from file " << filename << ": " << error;
823 return false;
824 }
825
Save(const std::string & filename,uint64_t * bytes_written,bool flush)826 bool ProfileCompilationInfo::Save(const std::string& filename,
827 uint64_t* bytes_written,
828 bool flush) {
829 ScopedTrace trace(__PRETTY_FUNCTION__);
830
831 #ifndef ART_TARGET_ANDROID
832 return SaveFallback(filename, bytes_written, flush);
833 #else
834 // Prior to U, SELinux policy doesn't allow apps to create profile files.
835 // Additionally, when installd is being used for dexopt, it acquires a flock when working on a
836 // profile. It's unclear to us whether the flock means that the file at the fd shouldn't change or
837 // that the file at the path shouldn't change, especially when the installd code is modified by
838 // partners. Therefore, we fall back to using a flock as well just to be safe.
839 if (!android::modules::sdklevel::IsAtLeastU() ||
840 !android::base::GetBoolProperty("dalvik.vm.useartservice", /*default_value=*/false)) {
841 return SaveFallback(filename, bytes_written, flush);
842 }
843
844 std::string tmp_filename = filename + ".XXXXXX.tmp";
845 // mkstemps creates the file with permissions 0600, which is the desired permissions, so there's
846 // no need to chmod.
847 android::base::unique_fd fd(mkostemps(tmp_filename.data(), /*suffixlen=*/4, O_CLOEXEC));
848 if (fd.get() < 0) {
849 PLOG(WARNING) << "Failed to create temp profile file for " << filename;
850 return false;
851 }
852
853 // In case anything goes wrong.
854 auto remove_tmp_file = android::base::make_scope_guard([&]() {
855 if (unlink(tmp_filename.c_str()) != 0) {
856 PLOG(WARNING) << "Failed to remove temp profile file " << tmp_filename;
857 }
858 });
859
860 bool result = Save(fd.get(), flush);
861 if (!result) {
862 VLOG(profiler) << "Failed to save profile info to temp profile file " << tmp_filename;
863 return false;
864 }
865
866 fd.reset();
867
868 // Move the temp profile file to the final location.
869 if (rename(tmp_filename.c_str(), filename.c_str()) != 0) {
870 PLOG(WARNING) << "Failed to commit profile file " << filename;
871 return false;
872 }
873
874 remove_tmp_file.Disable();
875
876 if (flush) {
877 std::string dirname = android::base::Dirname(filename);
878 std::unique_ptr<File> dir(OS::OpenFileForReading(dirname.c_str()));
879 if (dir == nullptr || dir->Flush(/*flush_metadata=*/true) != 0) {
880 PLOG(WARNING) << "Failed to flush directory " << dirname;
881 }
882 }
883
884 int64_t size = OS::GetFileSizeBytes(filename.c_str());
885 if (size != -1) {
886 VLOG(profiler) << "Successfully saved profile info to " << filename << " Size: " << size;
887 if (bytes_written != nullptr) {
888 *bytes_written = static_cast<uint64_t>(size);
889 }
890 } else {
891 VLOG(profiler) << "Saved profile info to " << filename
892 << " but failed to get size: " << strerror(errno);
893 }
894
895 return true;
896 #endif
897 }
898
SaveFallback(const std::string & filename,uint64_t * bytes_written,bool flush)899 bool ProfileCompilationInfo::SaveFallback(const std::string& filename,
900 uint64_t* bytes_written,
901 bool flush) {
902 std::string error;
903 #ifdef _WIN32
904 int flags = O_WRONLY | O_CREAT;
905 #else
906 int flags = O_WRONLY | O_NOFOLLOW | O_CLOEXEC | O_CREAT;
907 #endif
908 // There's no need to fsync profile data right away. We get many chances
909 // to write it again in case something goes wrong. We can rely on a simple
910 // close(), no sync, and let to the kernel decide when to write to disk.
911 ScopedFlock profile_file =
912 LockedFile::Open(filename.c_str(), flags, /*block=*/false, &error);
913 if (profile_file.get() == nullptr) {
914 LOG(WARNING) << "Couldn't lock the profile file " << filename << ": " << error;
915 return false;
916 }
917
918 int fd = profile_file->Fd();
919
920 // We need to clear the data because we don't support appending to the profiles yet.
921 if (!profile_file->ClearContent()) {
922 PLOG(WARNING) << "Could not clear profile file: " << filename;
923 return false;
924 }
925
926 // This doesn't need locking because we are trying to lock the file for exclusive
927 // access and fail immediately if we can't.
928 bool result = Save(fd, flush);
929
930 if (flush) {
931 std::string dirname = android::base::Dirname(filename);
932 std::unique_ptr<File> dir(OS::OpenFileForReading(dirname.c_str()));
933 if (dir == nullptr || dir->Flush(/*flush_metadata=*/true) != 0) {
934 PLOG(WARNING) << "Failed to flush directory " << dirname;
935 }
936 }
937
938 if (result) {
939 int64_t size = OS::GetFileSizeBytes(filename.c_str());
940 if (size != -1) {
941 VLOG(profiler)
942 << "Successfully saved profile info to " << filename << " Size: "
943 << size;
944 if (bytes_written != nullptr) {
945 *bytes_written = static_cast<uint64_t>(size);
946 }
947 } else {
948 VLOG(profiler) << "Saved profile info to " << filename
949 << " but failed to get size: " << strerror(errno);
950 }
951 } else {
952 VLOG(profiler) << "Failed to save profile info to " << filename;
953 }
954 return result;
955 }
956
957 // Returns true if all the bytes were successfully written to the file descriptor.
WriteBuffer(int fd,const void * buffer,size_t byte_count)958 static bool WriteBuffer(int fd, const void* buffer, size_t byte_count) {
959 while (byte_count > 0) {
960 int bytes_written = TEMP_FAILURE_RETRY(write(fd, buffer, byte_count));
961 if (bytes_written == -1) {
962 return false;
963 }
964 byte_count -= bytes_written; // Reduce the number of remaining bytes.
965 reinterpret_cast<const uint8_t*&>(buffer) += bytes_written; // Move the buffer forward.
966 }
967 return true;
968 }
969
970 /**
971 * Serialization format:
972 *
973 * The file starts with a header and section information:
974 * FileHeader
975 * FileSectionInfo[]
976 * The first FileSectionInfo must be for the DexFiles section.
977 *
978 * The rest of the file is allowed to contain different sections in any order,
979 * at arbitrary offsets, with any gaps betweeen them and each section can be
980 * either plaintext or separately zipped. However, we're writing sections
981 * without any gaps with the following order and compression:
982 * DexFiles - mandatory, plaintext
983 * ExtraDescriptors - optional, zipped
984 * Classes - optional, zipped
985 * Methods - optional, zipped
986 * AggregationCounts - optional, zipped, server-side
987 *
988 * DexFiles:
989 * number_of_dex_files
990 * (checksum,num_type_ids,num_method_ids,profile_key)[number_of_dex_files]
991 * where `profile_key` is a length-prefixed string, the length is `uint16_t`.
992 *
993 * ExtraDescriptors:
994 * number_of_extra_descriptors
995 * (extra_descriptor)[number_of_extra_descriptors]
996 * where `extra_descriptor` is a length-prefixed string, the length is `uint16_t`.
997 *
998 * Classes contains records for any number of dex files, each consisting of:
999 * profile_index // Index of the dex file in DexFiles section.
1000 * number_of_classes
1001 * type_index_diff[number_of_classes]
1002 * where instead of storing plain sorted type indexes, we store their differences
1003 * as smaller numbers are likely to compress better.
1004 *
1005 * Methods contains records for any number of dex files, each consisting of:
1006 * profile_index // Index of the dex file in DexFiles section.
1007 * following_data_size // For easy skipping of remaining data when dex file is filtered out.
1008 * method_flags
1009 * bitmap_data
1010 * method_encoding[] // Until the size indicated by `following_data_size`.
1011 * where `method_flags` is a union of flags recorded for methods in the referenced dex file,
1012 * `bitmap_data` contains `num_method_ids` bits for each bit set in `method_flags` other
1013 * than "hot" (the size of `bitmap_data` is rounded up to whole bytes) and `method_encoding[]`
1014 * contains data for hot methods. The `method_encoding` is:
1015 * method_index_diff
1016 * number_of_inline_caches
1017 * inline_cache_encoding[number_of_inline_caches]
1018 * where differences in method indexes are used for better compression,
1019 * and the `inline_cache_encoding` is
1020 * dex_pc
1021 * (M|dex_map_size)
1022 * type_index_diff[dex_map_size]
1023 * where `M` stands for special encodings indicating missing types (kIsMissingTypesEncoding)
1024 * or memamorphic call (kIsMegamorphicEncoding) which both imply `dex_map_size == 0`.
1025 **/
Save(int fd,bool flush)1026 bool ProfileCompilationInfo::Save(int fd, bool flush) {
1027 uint64_t start = NanoTime();
1028 ScopedTrace trace(__PRETTY_FUNCTION__);
1029 DCHECK_GE(fd, 0);
1030
1031 // Collect uncompressed section sizes.
1032 // Use `uint64_t` and assume this cannot overflow as we would have run out of memory.
1033 uint64_t extra_descriptors_section_size = 0u;
1034 if (!extra_descriptors_.empty()) {
1035 extra_descriptors_section_size += sizeof(uint16_t); // Number of descriptors.
1036 for (const std::string& descriptor : extra_descriptors_) {
1037 // Length-prefixed string, the length is `uint16_t`.
1038 extra_descriptors_section_size += sizeof(uint16_t) + descriptor.size();
1039 }
1040 }
1041 uint64_t dex_files_section_size = sizeof(ProfileIndexType); // Number of dex files.
1042 uint64_t classes_section_size = 0u;
1043 uint64_t methods_section_size = 0u;
1044 DCHECK_LE(info_.size(), MaxProfileIndex());
1045 for (const std::unique_ptr<DexFileData>& dex_data : info_) {
1046 if (dex_data->profile_key.size() > kMaxDexFileKeyLength) {
1047 LOG(WARNING) << "DexFileKey exceeds allocated limit";
1048 return false;
1049 }
1050 dex_files_section_size +=
1051 3 * sizeof(uint32_t) + // Checksum, num_type_ids, num_method_ids.
1052 // Length-prefixed string, the length is `uint16_t`.
1053 sizeof(uint16_t) + dex_data->profile_key.size();
1054 classes_section_size += dex_data->ClassesDataSize();
1055 methods_section_size += dex_data->MethodsDataSize();
1056 }
1057
1058 const uint32_t file_section_count =
1059 /* dex files */ 1u +
1060 /* extra descriptors */ (extra_descriptors_section_size != 0u ? 1u : 0u) +
1061 /* classes */ (classes_section_size != 0u ? 1u : 0u) +
1062 /* methods */ (methods_section_size != 0u ? 1u : 0u);
1063 uint64_t header_and_infos_size =
1064 sizeof(FileHeader) + file_section_count * sizeof(FileSectionInfo);
1065
1066 // Check size limit. Allow large profiles for non target builds for the case
1067 // where we are merging many profiles to generate a boot image profile.
1068 uint64_t total_uncompressed_size =
1069 header_and_infos_size +
1070 dex_files_section_size +
1071 extra_descriptors_section_size +
1072 classes_section_size +
1073 methods_section_size;
1074 VLOG(profiler) << "Required capacity: " << total_uncompressed_size << " bytes.";
1075 if (total_uncompressed_size > GetSizeErrorThresholdBytes()) {
1076 LOG(WARNING) << "Profile data size exceeds "
1077 << GetSizeErrorThresholdBytes()
1078 << " bytes. Profile will not be written to disk."
1079 << " It requires " << total_uncompressed_size << " bytes.";
1080 return false;
1081 }
1082
1083 // Start with an invalid file header and section infos.
1084 DCHECK_EQ(lseek(fd, 0, SEEK_CUR), 0);
1085 constexpr uint32_t kMaxNumberOfSections = enum_cast<uint32_t>(FileSectionType::kNumberOfSections);
1086 constexpr uint64_t kMaxHeaderAndInfosSize =
1087 sizeof(FileHeader) + kMaxNumberOfSections * sizeof(FileSectionInfo);
1088 DCHECK_LE(header_and_infos_size, kMaxHeaderAndInfosSize);
1089 std::array<uint8_t, kMaxHeaderAndInfosSize> placeholder;
1090 memset(placeholder.data(), 0, header_and_infos_size);
1091 if (!WriteBuffer(fd, placeholder.data(), header_and_infos_size)) {
1092 return false;
1093 }
1094
1095 std::array<FileSectionInfo, kMaxNumberOfSections> section_infos;
1096 size_t section_index = 0u;
1097 uint32_t file_offset = header_and_infos_size;
1098 auto add_section_info = [&](FileSectionType type, uint32_t file_size, uint32_t inflated_size) {
1099 DCHECK_LT(section_index, section_infos.size());
1100 section_infos[section_index] = FileSectionInfo(type, file_offset, file_size, inflated_size);
1101 file_offset += file_size;
1102 section_index += 1u;
1103 };
1104
1105 // Write the dex files section.
1106 {
1107 SafeBuffer buffer(dex_files_section_size);
1108 buffer.WriteUintAndAdvance(dchecked_integral_cast<ProfileIndexType>(info_.size()));
1109 for (const std::unique_ptr<DexFileData>& dex_data : info_) {
1110 buffer.WriteUintAndAdvance(dex_data->checksum);
1111 buffer.WriteUintAndAdvance(dex_data->num_type_ids);
1112 buffer.WriteUintAndAdvance(dex_data->num_method_ids);
1113 buffer.WriteUintAndAdvance(dchecked_integral_cast<uint16_t>(dex_data->profile_key.size()));
1114 buffer.WriteAndAdvance(dex_data->profile_key.c_str(), dex_data->profile_key.size());
1115 }
1116 DCHECK_EQ(buffer.GetAvailableBytes(), 0u);
1117 // Write the dex files section uncompressed.
1118 if (!WriteBuffer(fd, buffer.Get(), dex_files_section_size)) {
1119 return false;
1120 }
1121 add_section_info(FileSectionType::kDexFiles, dex_files_section_size, /*inflated_size=*/ 0u);
1122 }
1123
1124 // Write the extra descriptors section.
1125 if (extra_descriptors_section_size != 0u) {
1126 SafeBuffer buffer(extra_descriptors_section_size);
1127 buffer.WriteUintAndAdvance(dchecked_integral_cast<uint16_t>(extra_descriptors_.size()));
1128 for (const std::string& descriptor : extra_descriptors_) {
1129 buffer.WriteUintAndAdvance(dchecked_integral_cast<uint16_t>(descriptor.size()));
1130 buffer.WriteAndAdvance(descriptor.c_str(), descriptor.size());
1131 }
1132 if (!buffer.Deflate()) {
1133 return false;
1134 }
1135 if (!WriteBuffer(fd, buffer.Get(), buffer.Size())) {
1136 return false;
1137 }
1138 add_section_info(
1139 FileSectionType::kExtraDescriptors, buffer.Size(), extra_descriptors_section_size);
1140 }
1141
1142 // Write the classes section.
1143 if (classes_section_size != 0u) {
1144 SafeBuffer buffer(classes_section_size);
1145 for (const std::unique_ptr<DexFileData>& dex_data : info_) {
1146 dex_data->WriteClasses(buffer);
1147 }
1148 if (!buffer.Deflate()) {
1149 return false;
1150 }
1151 if (!WriteBuffer(fd, buffer.Get(), buffer.Size())) {
1152 return false;
1153 }
1154 add_section_info(FileSectionType::kClasses, buffer.Size(), classes_section_size);
1155 }
1156
1157 // Write the methods section.
1158 if (methods_section_size != 0u) {
1159 SafeBuffer buffer(methods_section_size);
1160 for (const std::unique_ptr<DexFileData>& dex_data : info_) {
1161 dex_data->WriteMethods(buffer);
1162 }
1163 if (!buffer.Deflate()) {
1164 return false;
1165 }
1166 if (!WriteBuffer(fd, buffer.Get(), buffer.Size())) {
1167 return false;
1168 }
1169 add_section_info(FileSectionType::kMethods, buffer.Size(), methods_section_size);
1170 }
1171
1172 if (file_offset > GetSizeWarningThresholdBytes()) {
1173 LOG(WARNING) << "Profile data size exceeds "
1174 << GetSizeWarningThresholdBytes()
1175 << " It has " << file_offset << " bytes";
1176 }
1177
1178 // Write section infos.
1179 if (lseek64(fd, sizeof(FileHeader), SEEK_SET) != sizeof(FileHeader)) {
1180 return false;
1181 }
1182 SafeBuffer section_infos_buffer(section_index * 4u * sizeof(uint32_t));
1183 for (size_t i = 0; i != section_index; ++i) {
1184 const FileSectionInfo& info = section_infos[i];
1185 section_infos_buffer.WriteUintAndAdvance(enum_cast<uint32_t>(info.GetType()));
1186 section_infos_buffer.WriteUintAndAdvance(info.GetFileOffset());
1187 section_infos_buffer.WriteUintAndAdvance(info.GetFileSize());
1188 section_infos_buffer.WriteUintAndAdvance(info.GetInflatedSize());
1189 }
1190 DCHECK_EQ(section_infos_buffer.GetAvailableBytes(), 0u);
1191 if (!WriteBuffer(fd, section_infos_buffer.Get(), section_infos_buffer.Size())) {
1192 return false;
1193 }
1194
1195 // Write header.
1196 FileHeader header(version_, section_index);
1197 if (lseek(fd, 0, SEEK_SET) != 0) {
1198 return false;
1199 }
1200 if (!WriteBuffer(fd, &header, sizeof(FileHeader))) {
1201 return false;
1202 }
1203
1204 if (flush) {
1205 // We do not flush for non-Linux because `flush` is only used by the runtime and the runtime
1206 // only supports Linux.
1207 #ifdef __linux__
1208 if (fsync(fd) != 0) {
1209 PLOG(WARNING) << "Failed to flush profile data";
1210 }
1211 #endif
1212 }
1213
1214 uint64_t total_time = NanoTime() - start;
1215 VLOG(profiler) << "Compressed from "
1216 << std::to_string(total_uncompressed_size)
1217 << " to "
1218 << std::to_string(file_offset);
1219 VLOG(profiler) << "Time to save profile: " << std::to_string(total_time);
1220 return true;
1221 }
1222
GetOrAddDexFileData(const std::string & profile_key,uint32_t checksum,uint32_t num_type_ids,uint32_t num_method_ids)1223 ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::GetOrAddDexFileData(
1224 const std::string& profile_key,
1225 uint32_t checksum,
1226 uint32_t num_type_ids,
1227 uint32_t num_method_ids) {
1228 DCHECK_EQ(profile_key_map_.size(), info_.size());
1229 auto profile_index_it = profile_key_map_.lower_bound(profile_key);
1230 if (profile_index_it == profile_key_map_.end() || profile_index_it->first != profile_key) {
1231 // We did not find the key. Create a new DexFileData if we did not reach the limit.
1232 DCHECK_LE(profile_key_map_.size(), MaxProfileIndex());
1233 if (profile_key_map_.size() == MaxProfileIndex()) {
1234 // Allow only a limited number dex files to be profiled. This allows us to save bytes
1235 // when encoding. For regular profiles this 2^8, and for boot profiles is 2^16
1236 // (well above what we expect for normal applications).
1237 LOG(ERROR) << "Exceeded the maximum number of dex file. Something went wrong";
1238 return nullptr;
1239 }
1240 ProfileIndexType new_profile_index = dchecked_integral_cast<ProfileIndexType>(info_.size());
1241 std::unique_ptr<DexFileData> dex_file_data(new (&allocator_) DexFileData(
1242 &allocator_,
1243 profile_key,
1244 checksum,
1245 new_profile_index,
1246 num_type_ids,
1247 num_method_ids,
1248 IsForBootImage()));
1249 // Record the new data in `profile_key_map_` and `info_`.
1250 std::string_view new_key(dex_file_data->profile_key);
1251 profile_index_it = profile_key_map_.PutBefore(profile_index_it, new_key, new_profile_index);
1252 info_.push_back(std::move(dex_file_data));
1253 DCHECK_EQ(profile_key_map_.size(), info_.size());
1254 }
1255
1256 ProfileIndexType profile_index = profile_index_it->second;
1257 DexFileData* result = info_[profile_index].get();
1258
1259 // Check that the checksum matches.
1260 // This may different if for example the dex file was updated and we had a record of the old one.
1261 if (result->checksum != checksum) {
1262 LOG(WARNING) << "Checksum mismatch for dex " << profile_key;
1263 return nullptr;
1264 }
1265
1266 // DCHECK that profile info map key is consistent with the one stored in the dex file data.
1267 // This should always be the case since since the cache map is managed by ProfileCompilationInfo.
1268 DCHECK_EQ(profile_key, result->profile_key);
1269 DCHECK_EQ(profile_index, result->profile_index);
1270
1271 if (num_type_ids != result->num_type_ids || num_method_ids != result->num_method_ids) {
1272 // This should not happen... added to help investigating b/65812889.
1273 LOG(ERROR) << "num_type_ids or num_method_ids mismatch for dex " << profile_key
1274 << ", types: expected=" << num_type_ids << " v. actual=" << result->num_type_ids
1275 << ", methods: expected=" << num_method_ids << " actual=" << result->num_method_ids;
1276 return nullptr;
1277 }
1278
1279 return result;
1280 }
1281
FindDexData(const std::string & profile_key,uint32_t checksum,bool verify_checksum) const1282 const ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::FindDexData(
1283 const std::string& profile_key,
1284 uint32_t checksum,
1285 bool verify_checksum) const {
1286 const auto profile_index_it = profile_key_map_.find(profile_key);
1287 if (profile_index_it == profile_key_map_.end()) {
1288 return nullptr;
1289 }
1290
1291 ProfileIndexType profile_index = profile_index_it->second;
1292 const DexFileData* result = info_[profile_index].get();
1293 if (verify_checksum && !ChecksumMatch(result->checksum, checksum)) {
1294 return nullptr;
1295 }
1296 DCHECK_EQ(profile_key, result->profile_key);
1297 DCHECK_EQ(profile_index, result->profile_index);
1298 return result;
1299 }
1300
FindDexDataUsingAnnotations(const DexFile * dex_file,const ProfileSampleAnnotation & annotation) const1301 const ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::FindDexDataUsingAnnotations(
1302 const DexFile* dex_file,
1303 const ProfileSampleAnnotation& annotation) const {
1304 if (annotation == ProfileSampleAnnotation::kNone) {
1305 std::string_view profile_key = GetProfileDexFileBaseKeyView(dex_file->GetLocation());
1306 for (const std::unique_ptr<DexFileData>& dex_data : info_) {
1307 if (profile_key == GetBaseKeyViewFromAugmentedKey(dex_data->profile_key)) {
1308 if (!ChecksumMatch(dex_data->checksum, dex_file->GetLocationChecksum())) {
1309 return nullptr;
1310 }
1311 return dex_data.get();
1312 }
1313 }
1314 } else {
1315 std::string profile_key = GetProfileDexFileAugmentedKey(dex_file->GetLocation(), annotation);
1316 return FindDexData(profile_key, dex_file->GetLocationChecksum());
1317 }
1318
1319 return nullptr;
1320 }
1321
FindAllDexData(const DexFile * dex_file,std::vector<const ProfileCompilationInfo::DexFileData * > * result) const1322 void ProfileCompilationInfo::FindAllDexData(
1323 const DexFile* dex_file,
1324 /*out*/ std::vector<const ProfileCompilationInfo::DexFileData*>* result) const {
1325 std::string_view profile_key = GetProfileDexFileBaseKeyView(dex_file->GetLocation());
1326 for (const std::unique_ptr<DexFileData>& dex_data : info_) {
1327 if (profile_key == GetBaseKeyViewFromAugmentedKey(dex_data->profile_key)) {
1328 if (ChecksumMatch(dex_data->checksum, dex_file->GetLocationChecksum())) {
1329 result->push_back(dex_data.get());
1330 }
1331 }
1332 }
1333 }
1334
AddExtraDescriptor(std::string_view extra_descriptor)1335 ProfileCompilationInfo::ExtraDescriptorIndex ProfileCompilationInfo::AddExtraDescriptor(
1336 std::string_view extra_descriptor) {
1337 DCHECK_LE(extra_descriptor.size(), kMaxExtraDescriptorLength);
1338 DCHECK(extra_descriptors_indexes_.find(extra_descriptor) == extra_descriptors_indexes_.end());
1339 ExtraDescriptorIndex new_extra_descriptor_index = extra_descriptors_.size();
1340 DCHECK_LE(new_extra_descriptor_index, kMaxExtraDescriptors);
1341 if (UNLIKELY(new_extra_descriptor_index == kMaxExtraDescriptors)) {
1342 return kMaxExtraDescriptors; // Cannot add another extra descriptor.
1343 }
1344 // Add the extra descriptor and record the new index.
1345 extra_descriptors_.emplace_back(extra_descriptor);
1346 extra_descriptors_indexes_.insert(new_extra_descriptor_index);
1347 return new_extra_descriptor_index;
1348 }
1349
AddMethod(const ProfileMethodInfo & pmi,MethodHotness::Flag flags,const ProfileSampleAnnotation & annotation,bool is_test)1350 bool ProfileCompilationInfo::AddMethod(const ProfileMethodInfo& pmi,
1351 MethodHotness::Flag flags,
1352 const ProfileSampleAnnotation& annotation,
1353 bool is_test) {
1354 DexFileData* const data = GetOrAddDexFileData(pmi.ref.dex_file, annotation);
1355 if (data == nullptr) { // checksum mismatch
1356 return false;
1357 }
1358 if (!data->AddMethod(flags, pmi.ref.index)) {
1359 return false;
1360 }
1361 if ((flags & MethodHotness::kFlagHot) == 0) {
1362 // The method is not hot, do not add inline caches.
1363 return true;
1364 }
1365
1366 // Add inline caches.
1367 InlineCacheMap* inline_cache = data->FindOrAddHotMethod(pmi.ref.index);
1368 DCHECK(inline_cache != nullptr);
1369
1370 const dex::MethodId& mid = pmi.ref.GetMethodId();
1371 const DexFile& dex_file = *pmi.ref.dex_file;
1372 const dex::ClassDef* class_def = dex_file.FindClassDef(mid.class_idx_);
1373 // If `is_test` is true, we don't try to look at whether dex_pc fit in the
1374 // code item of that method.
1375 uint32_t dex_pc_max = 0u;
1376 if (is_test) {
1377 dex_pc_max = std::numeric_limits<uint32_t>::max();
1378 } else {
1379 if (class_def == nullptr || dex_file.GetClassData(*class_def) == nullptr) {
1380 return true;
1381 }
1382 std::optional<uint32_t> offset = dex_file.GetCodeItemOffset(*class_def, pmi.ref.index);
1383 if (!offset.has_value()) {
1384 return true;
1385 }
1386 CodeItemInstructionAccessor accessor(dex_file, dex_file.GetCodeItem(offset.value()));
1387 dex_pc_max = accessor.InsnsSizeInCodeUnits();
1388 }
1389
1390 for (const ProfileMethodInfo::ProfileInlineCache& cache : pmi.inline_caches) {
1391 if (cache.dex_pc >= std::numeric_limits<uint16_t>::max()) {
1392 // Discard entries that don't fit the encoding. This should only apply to
1393 // inlined inline caches. See also `HInliner::GetInlineCacheAOT`.
1394 continue;
1395 }
1396 if (cache.dex_pc >= dex_pc_max) {
1397 // Discard entries for inlined inline caches. We don't support them in
1398 // profiles yet.
1399 continue;
1400 }
1401 if (cache.is_missing_types) {
1402 FindOrAddDexPc(inline_cache, cache.dex_pc)->SetIsMissingTypes();
1403 continue;
1404 }
1405 if (cache.is_megamorphic) {
1406 FindOrAddDexPc(inline_cache, cache.dex_pc)->SetIsMegamorphic();
1407 continue;
1408 }
1409 for (const TypeReference& class_ref : cache.classes) {
1410 DexPcData* dex_pc_data = FindOrAddDexPc(inline_cache, cache.dex_pc);
1411 if (dex_pc_data->is_missing_types || dex_pc_data->is_megamorphic) {
1412 // Don't bother adding classes if we are missing types or already megamorphic.
1413 break;
1414 }
1415 dex::TypeIndex type_index = FindOrCreateTypeIndex(*pmi.ref.dex_file, class_ref);
1416 if (type_index.IsValid()) {
1417 dex_pc_data->AddClass(type_index);
1418 } else {
1419 // Could not create artificial type index.
1420 dex_pc_data->SetIsMissingTypes();
1421 }
1422 }
1423 }
1424 return true;
1425 }
1426
1427 // TODO(calin): Fix this API. ProfileCompilationInfo::Load should be static and
1428 // return a unique pointer to a ProfileCompilationInfo upon success.
Load(int fd,bool merge_classes,const ProfileLoadFilterFn & filter_fn)1429 bool ProfileCompilationInfo::Load(
1430 int fd, bool merge_classes, const ProfileLoadFilterFn& filter_fn) {
1431 std::string error;
1432
1433 ProfileLoadStatus status = LoadInternal(fd, &error, merge_classes, filter_fn);
1434
1435 if (status == ProfileLoadStatus::kSuccess) {
1436 return true;
1437 } else {
1438 LOG(WARNING) << "Error when reading profile: " << error;
1439 return false;
1440 }
1441 }
1442
VerifyProfileData(const std::vector<const DexFile * > & dex_files)1443 bool ProfileCompilationInfo::VerifyProfileData(const std::vector<const DexFile*>& dex_files) {
1444 std::unordered_map<std::string_view, const DexFile*> key_to_dex_file;
1445 for (const DexFile* dex_file : dex_files) {
1446 key_to_dex_file.emplace(GetProfileDexFileBaseKeyView(dex_file->GetLocation()), dex_file);
1447 }
1448 for (const std::unique_ptr<DexFileData>& dex_data : info_) {
1449 // We need to remove any annotation from the key during verification.
1450 const auto it = key_to_dex_file.find(GetBaseKeyViewFromAugmentedKey(dex_data->profile_key));
1451 if (it == key_to_dex_file.end()) {
1452 // It is okay if profile contains data for additional dex files.
1453 continue;
1454 }
1455 const DexFile* dex_file = it->second;
1456 const std::string& dex_location = dex_file->GetLocation();
1457 if (!ChecksumMatch(dex_data->checksum, dex_file->GetLocationChecksum())) {
1458 LOG(ERROR) << "Dex checksum mismatch while verifying profile "
1459 << "dex location " << dex_location << " (checksum="
1460 << dex_file->GetLocationChecksum() << ", profile checksum="
1461 << dex_data->checksum;
1462 return false;
1463 }
1464
1465 if (dex_data->num_method_ids != dex_file->NumMethodIds() ||
1466 dex_data->num_type_ids != dex_file->NumTypeIds()) {
1467 LOG(ERROR) << "Number of type or method ids in dex file and profile don't match."
1468 << "dex location " << dex_location
1469 << " dex_file.NumTypeIds=" << dex_file->NumTypeIds()
1470 << " .v dex_data.num_type_ids=" << dex_data->num_type_ids
1471 << ", dex_file.NumMethodIds=" << dex_file->NumMethodIds()
1472 << " v. dex_data.num_method_ids=" << dex_data->num_method_ids;
1473 return false;
1474 }
1475
1476 // Class and method data should be valid. Verify only in debug builds.
1477 if (kIsDebugBuild) {
1478 // Verify method_encoding.
1479 for (const auto& method_it : dex_data->method_map) {
1480 CHECK_LT(method_it.first, dex_data->num_method_ids);
1481
1482 // Verify class indices of inline caches.
1483 const InlineCacheMap &inline_cache_map = method_it.second;
1484 for (const auto& inline_cache_it : inline_cache_map) {
1485 const DexPcData& dex_pc_data = inline_cache_it.second;
1486 if (dex_pc_data.is_missing_types || dex_pc_data.is_megamorphic) {
1487 // No class indices to verify.
1488 CHECK(dex_pc_data.classes.empty());
1489 continue;
1490 }
1491
1492 for (const dex::TypeIndex& type_index : dex_pc_data.classes) {
1493 if (type_index.index_ >= dex_data->num_type_ids) {
1494 CHECK_LT(type_index.index_ - dex_data->num_type_ids, extra_descriptors_.size());
1495 }
1496 }
1497 }
1498 }
1499 // Verify class_ids.
1500 for (const dex::TypeIndex& type_index : dex_data->class_set) {
1501 if (type_index.index_ >= dex_data->num_type_ids) {
1502 CHECK_LT(type_index.index_ - dex_data->num_type_ids, extra_descriptors_.size());
1503 }
1504 }
1505 }
1506 }
1507 return true;
1508 }
1509
OpenSource(int32_t fd,std::unique_ptr<ProfileSource> * source,std::string * error)1510 ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::OpenSource(
1511 int32_t fd,
1512 /*out*/ std::unique_ptr<ProfileSource>* source,
1513 /*out*/ std::string* error) {
1514 if (IsProfileFile(fd)) {
1515 source->reset(ProfileSource::Create(fd));
1516 return ProfileLoadStatus::kSuccess;
1517 } else {
1518 std::unique_ptr<ZipArchive> zip_archive(
1519 ZipArchive::OpenFromFd(DupCloexec(fd), "profile", error));
1520 if (zip_archive.get() == nullptr) {
1521 *error = "Could not open the profile zip archive";
1522 return ProfileLoadStatus::kBadData;
1523 }
1524 std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(kDexMetadataProfileEntry, error));
1525 if (zip_entry == nullptr) {
1526 // Allow archives without the profile entry. In this case, create an empty profile.
1527 // This gives more flexible when ure-using archives that may miss the entry.
1528 // (e.g. dex metadata files)
1529 LOG(WARNING) << "Could not find entry " << kDexMetadataProfileEntry
1530 << " in the zip archive. Creating an empty profile.";
1531 source->reset(ProfileSource::Create(MemMap::Invalid()));
1532 return ProfileLoadStatus::kSuccess;
1533 }
1534 if (zip_entry->GetUncompressedLength() == 0) {
1535 *error = "Empty profile entry in the zip archive.";
1536 return ProfileLoadStatus::kBadData;
1537 }
1538
1539 // TODO(calin) pass along file names to assist with debugging.
1540 MemMap map = zip_entry->MapDirectlyOrExtract(
1541 kDexMetadataProfileEntry, "profile file", error, alignof(ProfileSource));
1542
1543 if (map.IsValid()) {
1544 source->reset(ProfileSource::Create(std::move(map)));
1545 return ProfileLoadStatus::kSuccess;
1546 } else {
1547 return ProfileLoadStatus::kBadData;
1548 }
1549 }
1550 }
1551
Seek(off_t offset)1552 bool ProfileCompilationInfo::ProfileSource::Seek(off_t offset) {
1553 DCHECK_GE(offset, 0);
1554 if (IsMemMap()) {
1555 if (offset > static_cast<int64_t>(mem_map_.Size())) {
1556 return false;
1557 }
1558 mem_map_cur_ = offset;
1559 return true;
1560 } else {
1561 if (lseek64(fd_, offset, SEEK_SET) != offset) {
1562 return false;
1563 }
1564 return true;
1565 }
1566 }
1567
Read(void * buffer,size_t byte_count,const std::string & debug_stage,std::string * error)1568 ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::ProfileSource::Read(
1569 void* buffer,
1570 size_t byte_count,
1571 const std::string& debug_stage,
1572 std::string* error) {
1573 if (IsMemMap()) {
1574 DCHECK_LE(mem_map_cur_, mem_map_.Size());
1575 if (byte_count > mem_map_.Size() - mem_map_cur_) {
1576 return ProfileLoadStatus::kBadData;
1577 }
1578 memcpy(buffer, mem_map_.Begin() + mem_map_cur_, byte_count);
1579 mem_map_cur_ += byte_count;
1580 } else {
1581 while (byte_count > 0) {
1582 int bytes_read = TEMP_FAILURE_RETRY(read(fd_, buffer, byte_count));;
1583 if (bytes_read == 0) {
1584 *error += "Profile EOF reached prematurely for " + debug_stage;
1585 return ProfileLoadStatus::kBadData;
1586 } else if (bytes_read < 0) {
1587 *error += "Profile IO error for " + debug_stage + strerror(errno);
1588 return ProfileLoadStatus::kIOError;
1589 }
1590 byte_count -= bytes_read;
1591 reinterpret_cast<uint8_t*&>(buffer) += bytes_read;
1592 }
1593 }
1594 return ProfileLoadStatus::kSuccess;
1595 }
1596
1597
HasEmptyContent() const1598 bool ProfileCompilationInfo::ProfileSource::HasEmptyContent() const {
1599 if (IsMemMap()) {
1600 return !mem_map_.IsValid() || mem_map_.Size() == 0;
1601 } else {
1602 struct stat stat_buffer;
1603 if (fstat(fd_, &stat_buffer) != 0) {
1604 return false;
1605 }
1606 return stat_buffer.st_size == 0;
1607 }
1608 }
1609
ReadSectionData(ProfileSource & source,const FileSectionInfo & section_info,SafeBuffer * buffer,std::string * error)1610 ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::ReadSectionData(
1611 ProfileSource& source,
1612 const FileSectionInfo& section_info,
1613 /*out*/ SafeBuffer* buffer,
1614 /*out*/ std::string* error) {
1615 DCHECK_EQ(buffer->Size(), 0u);
1616 if (!source.Seek(section_info.GetFileOffset())) {
1617 *error = "Failed to seek to section data.";
1618 return ProfileLoadStatus::kIOError;
1619 }
1620 SafeBuffer temp_buffer(section_info.GetFileSize());
1621 ProfileLoadStatus status = source.Read(
1622 temp_buffer.GetCurrentPtr(), temp_buffer.GetAvailableBytes(), "ReadSectionData", error);
1623 if (status != ProfileLoadStatus::kSuccess) {
1624 return status;
1625 }
1626 if (section_info.GetInflatedSize() != 0u &&
1627 !temp_buffer.Inflate(section_info.GetInflatedSize())) {
1628 *error += "Error uncompressing section data.";
1629 return ProfileLoadStatus::kBadData;
1630 }
1631 buffer->Swap(temp_buffer);
1632 return ProfileLoadStatus::kSuccess;
1633 }
1634
ReadDexFilesSection(ProfileSource & source,const FileSectionInfo & section_info,const ProfileLoadFilterFn & filter_fn,dchecked_vector<ProfileIndexType> * dex_profile_index_remap,std::string * error)1635 ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::ReadDexFilesSection(
1636 ProfileSource& source,
1637 const FileSectionInfo& section_info,
1638 const ProfileLoadFilterFn& filter_fn,
1639 /*out*/ dchecked_vector<ProfileIndexType>* dex_profile_index_remap,
1640 /*out*/ std::string* error) {
1641 DCHECK(section_info.GetType() == FileSectionType::kDexFiles);
1642 SafeBuffer buffer;
1643 ProfileLoadStatus status = ReadSectionData(source, section_info, &buffer, error);
1644 if (status != ProfileLoadStatus::kSuccess) {
1645 return status;
1646 }
1647
1648 ProfileIndexType num_dex_files;
1649 if (!buffer.ReadUintAndAdvance(&num_dex_files)) {
1650 *error = "Error reading number of dex files.";
1651 return ProfileLoadStatus::kBadData;
1652 }
1653 if (num_dex_files >= MaxProfileIndex()) {
1654 *error = "Too many dex files.";
1655 return ProfileLoadStatus::kBadData;
1656 }
1657
1658 DCHECK(dex_profile_index_remap->empty());
1659 for (ProfileIndexType i = 0u; i != num_dex_files; ++i) {
1660 uint32_t checksum, num_type_ids, num_method_ids;
1661 if (!buffer.ReadUintAndAdvance(&checksum) ||
1662 !buffer.ReadUintAndAdvance(&num_type_ids) ||
1663 !buffer.ReadUintAndAdvance(&num_method_ids)) {
1664 *error = "Error reading dex file data.";
1665 return ProfileLoadStatus::kBadData;
1666 }
1667 std::string_view profile_key_view;
1668 if (!buffer.ReadStringAndAdvance(&profile_key_view)) {
1669 *error += "Missing terminating null character for profile key.";
1670 return ProfileLoadStatus::kBadData;
1671 }
1672 if (profile_key_view.size() == 0u || profile_key_view.size() > kMaxDexFileKeyLength) {
1673 *error = "ProfileKey has an invalid size: " + std::to_string(profile_key_view.size());
1674 return ProfileLoadStatus::kBadData;
1675 }
1676 std::string profile_key(profile_key_view);
1677 if (!filter_fn(profile_key, checksum)) {
1678 // Do not load data for this key. Store invalid index to `dex_profile_index_remap`.
1679 VLOG(compiler) << "Profile: Filtered out " << profile_key << " 0x" << std::hex << checksum;
1680 dex_profile_index_remap->push_back(MaxProfileIndex());
1681 continue;
1682 }
1683 DexFileData* data = GetOrAddDexFileData(profile_key, checksum, num_type_ids, num_method_ids);
1684 if (data == nullptr) {
1685 if (UNLIKELY(profile_key_map_.size() == MaxProfileIndex()) &&
1686 profile_key_map_.find(profile_key) == profile_key_map_.end()) {
1687 *error = "Too many dex files.";
1688 } else {
1689 *error = "Checksum, NumTypeIds, or NumMethodIds mismatch for " + profile_key;
1690 }
1691 return ProfileLoadStatus::kBadData;
1692 }
1693 dex_profile_index_remap->push_back(data->profile_index);
1694 }
1695 if (buffer.GetAvailableBytes() != 0u) {
1696 *error = "Unexpected data at end of dex files section.";
1697 return ProfileLoadStatus::kBadData;
1698 }
1699 return ProfileLoadStatus::kSuccess;
1700 }
1701
ReadExtraDescriptorsSection(ProfileSource & source,const FileSectionInfo & section_info,dchecked_vector<ExtraDescriptorIndex> * extra_descriptors_remap,std::string * error)1702 ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::ReadExtraDescriptorsSection(
1703 ProfileSource& source,
1704 const FileSectionInfo& section_info,
1705 /*out*/ dchecked_vector<ExtraDescriptorIndex>* extra_descriptors_remap,
1706 /*out*/ std::string* error) {
1707 DCHECK(section_info.GetType() == FileSectionType::kExtraDescriptors);
1708 SafeBuffer buffer;
1709 ProfileLoadStatus status = ReadSectionData(source, section_info, &buffer, error);
1710 if (status != ProfileLoadStatus::kSuccess) {
1711 return status;
1712 }
1713
1714 uint16_t num_extra_descriptors;
1715 if (!buffer.ReadUintAndAdvance(&num_extra_descriptors)) {
1716 *error = "Error reading number of extra descriptors.";
1717 return ProfileLoadStatus::kBadData;
1718 }
1719
1720 // Note: We allow multiple extra descriptors sections in a single profile file
1721 // but that can lead to `kMergeError` if there are too many extra descriptors.
1722 // Other sections can reference only extra descriptors from preceding sections.
1723 extra_descriptors_remap->reserve(
1724 std::min<size_t>(extra_descriptors_remap->size() + num_extra_descriptors,
1725 std::numeric_limits<uint16_t>::max()));
1726 for (uint16_t i = 0; i != num_extra_descriptors; ++i) {
1727 std::string_view extra_descriptor;
1728 if (!buffer.ReadStringAndAdvance(&extra_descriptor)) {
1729 *error += "Missing terminating null character for extra descriptor.";
1730 return ProfileLoadStatus::kBadData;
1731 }
1732 if (!IsValidDescriptor(std::string(extra_descriptor).c_str())) {
1733 *error += "Invalid extra descriptor.";
1734 return ProfileLoadStatus::kBadData;
1735 }
1736 // Try to match an existing extra descriptor.
1737 auto it = extra_descriptors_indexes_.find(extra_descriptor);
1738 if (it != extra_descriptors_indexes_.end()) {
1739 extra_descriptors_remap->push_back(*it);
1740 continue;
1741 }
1742 // Try to insert a new extra descriptor.
1743 ExtraDescriptorIndex extra_descriptor_index = AddExtraDescriptor(extra_descriptor);
1744 if (extra_descriptor_index == kMaxExtraDescriptors) {
1745 *error = "Too many extra descriptors.";
1746 return ProfileLoadStatus::kMergeError;
1747 }
1748 extra_descriptors_remap->push_back(extra_descriptor_index);
1749 }
1750 return ProfileLoadStatus::kSuccess;
1751 }
1752
ReadClassesSection(ProfileSource & source,const FileSectionInfo & section_info,const dchecked_vector<ProfileIndexType> & dex_profile_index_remap,const dchecked_vector<ExtraDescriptorIndex> & extra_descriptors_remap,std::string * error)1753 ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::ReadClassesSection(
1754 ProfileSource& source,
1755 const FileSectionInfo& section_info,
1756 const dchecked_vector<ProfileIndexType>& dex_profile_index_remap,
1757 const dchecked_vector<ExtraDescriptorIndex>& extra_descriptors_remap,
1758 /*out*/ std::string* error) {
1759 DCHECK(section_info.GetType() == FileSectionType::kClasses);
1760 SafeBuffer buffer;
1761 ProfileLoadStatus status = ReadSectionData(source, section_info, &buffer, error);
1762 if (status != ProfileLoadStatus::kSuccess) {
1763 return status;
1764 }
1765
1766 while (buffer.GetAvailableBytes() != 0u) {
1767 ProfileIndexType profile_index;
1768 if (!buffer.ReadUintAndAdvance(&profile_index)) {
1769 *error = "Error profile index in classes section.";
1770 return ProfileLoadStatus::kBadData;
1771 }
1772 if (profile_index >= dex_profile_index_remap.size()) {
1773 *error = "Invalid profile index in classes section.";
1774 return ProfileLoadStatus::kBadData;
1775 }
1776 profile_index = dex_profile_index_remap[profile_index];
1777 if (profile_index == MaxProfileIndex()) {
1778 status = DexFileData::SkipClasses(buffer, error);
1779 } else {
1780 status = info_[profile_index]->ReadClasses(buffer, extra_descriptors_remap, error);
1781 }
1782 if (status != ProfileLoadStatus::kSuccess) {
1783 return status;
1784 }
1785 }
1786 return ProfileLoadStatus::kSuccess;
1787 }
1788
ReadMethodsSection(ProfileSource & source,const FileSectionInfo & section_info,const dchecked_vector<ProfileIndexType> & dex_profile_index_remap,const dchecked_vector<ExtraDescriptorIndex> & extra_descriptors_remap,std::string * error)1789 ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::ReadMethodsSection(
1790 ProfileSource& source,
1791 const FileSectionInfo& section_info,
1792 const dchecked_vector<ProfileIndexType>& dex_profile_index_remap,
1793 const dchecked_vector<ExtraDescriptorIndex>& extra_descriptors_remap,
1794 /*out*/ std::string* error) {
1795 DCHECK(section_info.GetType() == FileSectionType::kMethods);
1796 SafeBuffer buffer;
1797 ProfileLoadStatus status = ReadSectionData(source, section_info, &buffer, error);
1798 if (status != ProfileLoadStatus::kSuccess) {
1799 return status;
1800 }
1801
1802 while (buffer.GetAvailableBytes() != 0u) {
1803 ProfileIndexType profile_index;
1804 if (!buffer.ReadUintAndAdvance(&profile_index)) {
1805 *error = "Error profile index in methods section.";
1806 return ProfileLoadStatus::kBadData;
1807 }
1808 if (profile_index >= dex_profile_index_remap.size()) {
1809 *error = "Invalid profile index in methods section.";
1810 return ProfileLoadStatus::kBadData;
1811 }
1812 profile_index = dex_profile_index_remap[profile_index];
1813 if (profile_index == MaxProfileIndex()) {
1814 status = DexFileData::SkipMethods(buffer, error);
1815 } else {
1816 status = info_[profile_index]->ReadMethods(buffer, extra_descriptors_remap, error);
1817 }
1818 if (status != ProfileLoadStatus::kSuccess) {
1819 return status;
1820 }
1821 }
1822 return ProfileLoadStatus::kSuccess;
1823 }
1824
1825 // TODO(calin): fail fast if the dex checksums don't match.
LoadInternal(int32_t fd,std::string * error,bool merge_classes,const ProfileLoadFilterFn & filter_fn)1826 ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::LoadInternal(
1827 int32_t fd,
1828 std::string* error,
1829 bool merge_classes,
1830 const ProfileLoadFilterFn& filter_fn) {
1831 ScopedTrace trace(__PRETTY_FUNCTION__);
1832 DCHECK_GE(fd, 0);
1833
1834 std::unique_ptr<ProfileSource> source;
1835 ProfileLoadStatus status = OpenSource(fd, &source, error);
1836 if (status != ProfileLoadStatus::kSuccess) {
1837 return status;
1838 }
1839
1840 // We allow empty profile files.
1841 // Profiles may be created by ActivityManager or installd before we manage to
1842 // process them in the runtime or profman.
1843 if (source->HasEmptyContent()) {
1844 return ProfileLoadStatus::kSuccess;
1845 }
1846
1847 // Read file header.
1848 FileHeader header;
1849 status = source->Read(&header, sizeof(FileHeader), "ReadProfileHeader", error);
1850 if (status != ProfileLoadStatus::kSuccess) {
1851 return status;
1852 }
1853 if (!header.IsValid()) {
1854 return header.InvalidHeaderMessage(error);
1855 }
1856 if (memcmp(header.GetVersion(), version_, kProfileVersionSize) != 0) {
1857 *error = IsForBootImage() ? "Expected boot profile, got app profile."
1858 : "Expected app profile, got boot profile.";
1859 return ProfileLoadStatus::kVersionMismatch;
1860 }
1861
1862 // Check if there are too many section infos.
1863 uint32_t section_count = header.GetFileSectionCount();
1864 uint32_t uncompressed_data_size = sizeof(FileHeader) + section_count * sizeof(FileSectionInfo);
1865 if (uncompressed_data_size > GetSizeErrorThresholdBytes()) {
1866 LOG(WARNING) << "Profile data size exceeds " << GetSizeErrorThresholdBytes()
1867 << " bytes. It has " << uncompressed_data_size << " bytes.";
1868 return ProfileLoadStatus::kBadData;
1869 }
1870
1871 // Read section infos.
1872 dchecked_vector<FileSectionInfo> section_infos(section_count);
1873 status = source->Read(
1874 section_infos.data(), section_count * sizeof(FileSectionInfo), "ReadSectionInfos", error);
1875 if (status != ProfileLoadStatus::kSuccess) {
1876 return status;
1877 }
1878
1879 // Finish uncompressed data size calculation.
1880 for (const FileSectionInfo& section_info : section_infos) {
1881 uint32_t mem_size = section_info.GetMemSize();
1882 if (UNLIKELY(mem_size > std::numeric_limits<uint32_t>::max() - uncompressed_data_size)) {
1883 *error = "Total memory size overflow.";
1884 return ProfileLoadStatus::kBadData;
1885 }
1886 uncompressed_data_size += mem_size;
1887 }
1888
1889 // Allow large profiles for non target builds for the case where we are merging many profiles
1890 // to generate a boot image profile.
1891 if (uncompressed_data_size > GetSizeErrorThresholdBytes()) {
1892 LOG(WARNING) << "Profile data size exceeds "
1893 << GetSizeErrorThresholdBytes()
1894 << " bytes. It has " << uncompressed_data_size << " bytes.";
1895 return ProfileLoadStatus::kBadData;
1896 }
1897 if (uncompressed_data_size > GetSizeWarningThresholdBytes()) {
1898 LOG(WARNING) << "Profile data size exceeds "
1899 << GetSizeWarningThresholdBytes()
1900 << " bytes. It has " << uncompressed_data_size << " bytes.";
1901 }
1902
1903 // Process the mandatory dex files section.
1904 DCHECK_NE(section_count, 0u); // Checked by `header.IsValid()` above.
1905 const FileSectionInfo& dex_files_section_info = section_infos[0];
1906 if (dex_files_section_info.GetType() != FileSectionType::kDexFiles) {
1907 *error = "First section is not dex files section.";
1908 return ProfileLoadStatus::kBadData;
1909 }
1910 dchecked_vector<ProfileIndexType> dex_profile_index_remap;
1911 status = ReadDexFilesSection(
1912 *source, dex_files_section_info, filter_fn, &dex_profile_index_remap, error);
1913 if (status != ProfileLoadStatus::kSuccess) {
1914 DCHECK(!error->empty());
1915 return status;
1916 }
1917
1918 // Process all other sections.
1919 dchecked_vector<ExtraDescriptorIndex> extra_descriptors_remap;
1920 for (uint32_t i = 1u; i != section_count; ++i) {
1921 const FileSectionInfo& section_info = section_infos[i];
1922 DCHECK(status == ProfileLoadStatus::kSuccess);
1923 switch (section_info.GetType()) {
1924 case FileSectionType::kDexFiles:
1925 *error = "Unsupported additional dex files section.";
1926 status = ProfileLoadStatus::kBadData;
1927 break;
1928 case FileSectionType::kExtraDescriptors:
1929 status = ReadExtraDescriptorsSection(
1930 *source, section_info, &extra_descriptors_remap, error);
1931 break;
1932 case FileSectionType::kClasses:
1933 // Skip if all dex files were filtered out.
1934 if (!info_.empty() && merge_classes) {
1935 status = ReadClassesSection(
1936 *source, section_info, dex_profile_index_remap, extra_descriptors_remap, error);
1937 }
1938 break;
1939 case FileSectionType::kMethods:
1940 // Skip if all dex files were filtered out.
1941 if (!info_.empty()) {
1942 status = ReadMethodsSection(
1943 *source, section_info, dex_profile_index_remap, extra_descriptors_remap, error);
1944 }
1945 break;
1946 case FileSectionType::kAggregationCounts:
1947 // This section is only used on server side.
1948 break;
1949 default:
1950 // Unknown section. Skip it. New versions of ART are allowed
1951 // to add sections that shall be ignored by old versions.
1952 break;
1953 }
1954 if (status != ProfileLoadStatus::kSuccess) {
1955 DCHECK(!error->empty());
1956 return status;
1957 }
1958 }
1959
1960 return ProfileLoadStatus::kSuccess;
1961 }
1962
MergeWith(const ProfileCompilationInfo & other,bool merge_classes)1963 bool ProfileCompilationInfo::MergeWith(const ProfileCompilationInfo& other,
1964 bool merge_classes) {
1965 if (!SameVersion(other)) {
1966 LOG(WARNING) << "Cannot merge different profile versions";
1967 return false;
1968 }
1969
1970 // First verify that all checksums match. This will avoid adding garbage to
1971 // the current profile info.
1972 // Note that the number of elements should be very small, so this should not
1973 // be a performance issue.
1974 for (const std::unique_ptr<DexFileData>& other_dex_data : other.info_) {
1975 // verify_checksum is false because we want to differentiate between a missing dex data and
1976 // a mismatched checksum.
1977 const DexFileData* dex_data = FindDexData(other_dex_data->profile_key,
1978 /* checksum= */ 0u,
1979 /* verify_checksum= */ false);
1980 if ((dex_data != nullptr) && (dex_data->checksum != other_dex_data->checksum)) {
1981 LOG(WARNING) << "Checksum mismatch for dex " << other_dex_data->profile_key;
1982 return false;
1983 }
1984 }
1985 // All checksums match. Import the data.
1986
1987 // The other profile might have a different indexing of dex files.
1988 // That is because each dex files gets a 'dex_profile_index' on a first come first served basis.
1989 // That means that the order in with the methods are added to the profile matters for the
1990 // actual indices.
1991 // The reason we cannot rely on the actual multidex index is that a single profile may store
1992 // data from multiple splits. This means that a profile may contain a classes2.dex from split-A
1993 // and one from split-B.
1994
1995 // First, build a mapping from other_dex_profile_index to this_dex_profile_index.
1996 dchecked_vector<ProfileIndexType> dex_profile_index_remap;
1997 dex_profile_index_remap.reserve(other.info_.size());
1998 for (const std::unique_ptr<DexFileData>& other_dex_data : other.info_) {
1999 const DexFileData* dex_data = GetOrAddDexFileData(other_dex_data->profile_key,
2000 other_dex_data->checksum,
2001 other_dex_data->num_type_ids,
2002 other_dex_data->num_method_ids);
2003 if (dex_data == nullptr) {
2004 // Could happen if we exceed the number of allowed dex files or there is
2005 // a mismatch in `num_type_ids` or `num_method_ids`.
2006 return false;
2007 }
2008 DCHECK_EQ(other_dex_data->profile_index, dex_profile_index_remap.size());
2009 dex_profile_index_remap.push_back(dex_data->profile_index);
2010 }
2011
2012 // Then merge extra descriptors.
2013 dchecked_vector<ExtraDescriptorIndex> extra_descriptors_remap;
2014 extra_descriptors_remap.reserve(other.extra_descriptors_.size());
2015 for (const std::string& other_extra_descriptor : other.extra_descriptors_) {
2016 auto it = extra_descriptors_indexes_.find(std::string_view(other_extra_descriptor));
2017 if (it != extra_descriptors_indexes_.end()) {
2018 extra_descriptors_remap.push_back(*it);
2019 } else {
2020 ExtraDescriptorIndex extra_descriptor_index = AddExtraDescriptor(other_extra_descriptor);
2021 if (extra_descriptor_index == kMaxExtraDescriptors) {
2022 // Too many extra descriptors.
2023 return false;
2024 }
2025 extra_descriptors_remap.push_back(extra_descriptor_index);
2026 }
2027 }
2028
2029 // Merge the actual profile data.
2030 for (const std::unique_ptr<DexFileData>& other_dex_data : other.info_) {
2031 DexFileData* dex_data = info_[dex_profile_index_remap[other_dex_data->profile_index]].get();
2032 DCHECK_EQ(dex_data, FindDexData(other_dex_data->profile_key, other_dex_data->checksum));
2033
2034 // Merge the classes.
2035 uint32_t num_type_ids = dex_data->num_type_ids;
2036 DCHECK_EQ(num_type_ids, other_dex_data->num_type_ids);
2037 if (merge_classes) {
2038 // Classes are ordered by the `TypeIndex`, so we have the classes with a `TypeId`
2039 // in the dex file first, followed by classes using extra descriptors.
2040 auto it = other_dex_data->class_set.lower_bound(dex::TypeIndex(num_type_ids));
2041 dex_data->class_set.insert(other_dex_data->class_set.begin(), it);
2042 for (auto end = other_dex_data->class_set.end(); it != end; ++it) {
2043 ExtraDescriptorIndex new_extra_descriptor_index =
2044 extra_descriptors_remap[it->index_ - num_type_ids];
2045 if (new_extra_descriptor_index >= DexFile::kDexNoIndex16 - num_type_ids) {
2046 // Cannot represent the type with new extra descriptor index.
2047 return false;
2048 }
2049 dex_data->class_set.insert(dex::TypeIndex(num_type_ids + new_extra_descriptor_index));
2050 }
2051 }
2052
2053 // Merge the methods and the inline caches.
2054 for (const auto& other_method_it : other_dex_data->method_map) {
2055 uint16_t other_method_index = other_method_it.first;
2056 InlineCacheMap* inline_cache = dex_data->FindOrAddHotMethod(other_method_index);
2057 if (inline_cache == nullptr) {
2058 return false;
2059 }
2060 const auto& other_inline_cache = other_method_it.second;
2061 for (const auto& other_ic_it : other_inline_cache) {
2062 uint16_t other_dex_pc = other_ic_it.first;
2063 const ArenaSet<dex::TypeIndex>& other_class_set = other_ic_it.second.classes;
2064 DexPcData* dex_pc_data = FindOrAddDexPc(inline_cache, other_dex_pc);
2065 if (other_ic_it.second.is_missing_types) {
2066 dex_pc_data->SetIsMissingTypes();
2067 } else if (other_ic_it.second.is_megamorphic) {
2068 dex_pc_data->SetIsMegamorphic();
2069 } else {
2070 for (dex::TypeIndex type_index : other_class_set) {
2071 if (type_index.index_ >= num_type_ids) {
2072 ExtraDescriptorIndex new_extra_descriptor_index =
2073 extra_descriptors_remap[type_index.index_ - num_type_ids];
2074 if (new_extra_descriptor_index >= DexFile::kDexNoIndex16 - num_type_ids) {
2075 // Cannot represent the type with new extra descriptor index.
2076 return false;
2077 }
2078 type_index = dex::TypeIndex(num_type_ids + new_extra_descriptor_index);
2079 }
2080 dex_pc_data->AddClass(type_index);
2081 }
2082 }
2083 }
2084 }
2085
2086 // Merge the method bitmaps.
2087 dex_data->MergeBitmap(*other_dex_data);
2088 }
2089
2090 return true;
2091 }
2092
GetMethodHotness(const MethodReference & method_ref,const ProfileSampleAnnotation & annotation) const2093 ProfileCompilationInfo::MethodHotness ProfileCompilationInfo::GetMethodHotness(
2094 const MethodReference& method_ref,
2095 const ProfileSampleAnnotation& annotation) const {
2096 const DexFileData* dex_data = FindDexDataUsingAnnotations(method_ref.dex_file, annotation);
2097 return dex_data != nullptr
2098 ? dex_data->GetHotnessInfo(method_ref.index)
2099 : MethodHotness();
2100 }
2101
ContainsClass(const DexFile & dex_file,dex::TypeIndex type_idx,const ProfileSampleAnnotation & annotation) const2102 bool ProfileCompilationInfo::ContainsClass(const DexFile& dex_file,
2103 dex::TypeIndex type_idx,
2104 const ProfileSampleAnnotation& annotation) const {
2105 const DexFileData* dex_data = FindDexDataUsingAnnotations(&dex_file, annotation);
2106 return (dex_data != nullptr) && dex_data->ContainsClass(type_idx);
2107 }
2108
GetNumberOfMethods() const2109 uint32_t ProfileCompilationInfo::GetNumberOfMethods() const {
2110 uint32_t total = 0;
2111 for (const std::unique_ptr<DexFileData>& dex_data : info_) {
2112 total += dex_data->method_map.size();
2113 }
2114 return total;
2115 }
2116
GetNumberOfResolvedClasses() const2117 uint32_t ProfileCompilationInfo::GetNumberOfResolvedClasses() const {
2118 uint32_t total = 0;
2119 for (const std::unique_ptr<DexFileData>& dex_data : info_) {
2120 total += dex_data->class_set.size();
2121 }
2122 return total;
2123 }
2124
DumpInfo(const std::vector<const DexFile * > & dex_files,bool print_full_dex_location) const2125 std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>& dex_files,
2126 bool print_full_dex_location) const {
2127 std::ostringstream os;
2128
2129 os << "ProfileInfo [";
2130
2131 for (size_t k = 0; k < kProfileVersionSize - 1; k++) {
2132 // Iterate to 'kProfileVersionSize - 1' because the version_ ends with '\0'
2133 // which we don't want to print.
2134 os << static_cast<char>(version_[k]);
2135 }
2136 os << "]\n";
2137
2138 if (info_.empty()) {
2139 os << "-empty-";
2140 return os.str();
2141 }
2142
2143 if (!extra_descriptors_.empty()) {
2144 os << "\nextra descriptors:";
2145 for (const std::string& str : extra_descriptors_) {
2146 os << "\n\t" << str;
2147 }
2148 os << "\n";
2149 }
2150
2151 const std::string kFirstDexFileKeySubstitute = "!classes.dex";
2152
2153 for (const std::unique_ptr<DexFileData>& dex_data : info_) {
2154 os << "\n";
2155 if (print_full_dex_location) {
2156 os << dex_data->profile_key;
2157 } else {
2158 // Replace the (empty) multidex suffix of the first key with a substitute for easier reading.
2159 std::string multidex_suffix = DexFileLoader::GetMultiDexSuffix(
2160 GetBaseKeyFromAugmentedKey(dex_data->profile_key));
2161 os << (multidex_suffix.empty() ? kFirstDexFileKeySubstitute : multidex_suffix);
2162 }
2163 os << " [index=" << static_cast<uint32_t>(dex_data->profile_index) << "]";
2164 os << " [checksum=" << std::hex << dex_data->checksum << "]" << std::dec;
2165 os << " [num_type_ids=" << dex_data->num_type_ids << "]";
2166 os << " [num_method_ids=" << dex_data->num_method_ids << "]";
2167 const DexFile* dex_file = nullptr;
2168 for (const DexFile* current : dex_files) {
2169 if (GetBaseKeyViewFromAugmentedKey(dex_data->profile_key) ==
2170 GetProfileDexFileBaseKeyView(current->GetLocation()) &&
2171 ChecksumMatch(dex_data->checksum, current->GetLocationChecksum())) {
2172 dex_file = current;
2173 break;
2174 }
2175 }
2176 os << "\n\thot methods: ";
2177 for (const auto& method_it : dex_data->method_map) {
2178 if (dex_file != nullptr) {
2179 os << "\n\t\t" << dex_file->PrettyMethod(method_it.first, true);
2180 } else {
2181 os << method_it.first;
2182 }
2183
2184 os << "[";
2185 for (const auto& inline_cache_it : method_it.second) {
2186 os << "{" << std::hex << inline_cache_it.first << std::dec << ":";
2187 if (inline_cache_it.second.is_missing_types) {
2188 os << "MT";
2189 } else if (inline_cache_it.second.is_megamorphic) {
2190 os << "MM";
2191 } else {
2192 const char* separator = "";
2193 for (dex::TypeIndex type_index : inline_cache_it.second.classes) {
2194 os << separator << type_index.index_;
2195 separator = ",";
2196 }
2197 }
2198 os << "}";
2199 }
2200 os << "], ";
2201 }
2202 bool startup = true;
2203 while (true) {
2204 os << "\n\t" << (startup ? "startup methods: " : "post startup methods: ");
2205 for (uint32_t method_idx = 0; method_idx < dex_data->num_method_ids; ++method_idx) {
2206 MethodHotness hotness_info(dex_data->GetHotnessInfo(method_idx));
2207 if (startup ? hotness_info.IsStartup() : hotness_info.IsPostStartup()) {
2208 if (dex_file != nullptr) {
2209 os << "\n\t\t" << dex_file->PrettyMethod(method_idx, true);
2210 } else {
2211 os << method_idx << ", ";
2212 }
2213 }
2214 }
2215 if (startup == false) {
2216 break;
2217 }
2218 startup = false;
2219 }
2220 os << "\n\tclasses: ";
2221 for (dex::TypeIndex type_index : dex_data->class_set) {
2222 if (dex_file != nullptr) {
2223 os << "\n\t\t" << PrettyDescriptor(GetTypeDescriptor(dex_file, type_index));
2224 } else {
2225 os << type_index.index_ << ",";
2226 }
2227 }
2228 }
2229 return os.str();
2230 }
2231
GetClassesAndMethods(const DexFile & dex_file,std::set<dex::TypeIndex> * class_set,std::set<uint16_t> * hot_method_set,std::set<uint16_t> * startup_method_set,std::set<uint16_t> * post_startup_method_method_set,const ProfileSampleAnnotation & annotation) const2232 bool ProfileCompilationInfo::GetClassesAndMethods(
2233 const DexFile& dex_file,
2234 /*out*/std::set<dex::TypeIndex>* class_set,
2235 /*out*/std::set<uint16_t>* hot_method_set,
2236 /*out*/std::set<uint16_t>* startup_method_set,
2237 /*out*/std::set<uint16_t>* post_startup_method_method_set,
2238 const ProfileSampleAnnotation& annotation) const {
2239 std::set<std::string> ret;
2240 const DexFileData* dex_data = FindDexDataUsingAnnotations(&dex_file, annotation);
2241 if (dex_data == nullptr) {
2242 return false;
2243 }
2244 for (const auto& it : dex_data->method_map) {
2245 hot_method_set->insert(it.first);
2246 }
2247 for (uint32_t method_idx = 0; method_idx < dex_data->num_method_ids; ++method_idx) {
2248 MethodHotness hotness = dex_data->GetHotnessInfo(method_idx);
2249 if (hotness.IsStartup()) {
2250 startup_method_set->insert(method_idx);
2251 }
2252 if (hotness.IsPostStartup()) {
2253 post_startup_method_method_set->insert(method_idx);
2254 }
2255 }
2256 for (const dex::TypeIndex& type_index : dex_data->class_set) {
2257 class_set->insert(type_index);
2258 }
2259 return true;
2260 }
2261
GetClasses(const DexFile & dex_file,const ProfileSampleAnnotation & annotation) const2262 const ArenaSet<dex::TypeIndex>* ProfileCompilationInfo::GetClasses(
2263 const DexFile& dex_file,
2264 const ProfileSampleAnnotation& annotation) const {
2265 const DexFileData* dex_data = FindDexDataUsingAnnotations(&dex_file, annotation);
2266 if (dex_data == nullptr) {
2267 return nullptr;
2268 }
2269 return &dex_data->class_set;
2270 }
2271
SameVersion(const ProfileCompilationInfo & other) const2272 bool ProfileCompilationInfo::SameVersion(const ProfileCompilationInfo& other) const {
2273 return memcmp(version_, other.version_, kProfileVersionSize) == 0;
2274 }
2275
Equals(const ProfileCompilationInfo & other)2276 bool ProfileCompilationInfo::Equals(const ProfileCompilationInfo& other) {
2277 // No need to compare profile_key_map_. That's only a cache for fast search.
2278 // All the information is already in the info_ vector.
2279 if (!SameVersion(other)) {
2280 return false;
2281 }
2282 if (info_.size() != other.info_.size()) {
2283 return false;
2284 }
2285 for (size_t i = 0; i < info_.size(); i++) {
2286 const DexFileData& dex_data = *info_[i];
2287 const DexFileData& other_dex_data = *other.info_[i];
2288 if (!(dex_data == other_dex_data)) {
2289 return false;
2290 }
2291 }
2292
2293 return true;
2294 }
2295
2296 // Naive implementation to generate a random profile file suitable for testing.
GenerateTestProfile(int fd,uint16_t number_of_dex_files,uint16_t method_percentage,uint16_t class_percentage,uint32_t random_seed)2297 bool ProfileCompilationInfo::GenerateTestProfile(int fd,
2298 uint16_t number_of_dex_files,
2299 uint16_t method_percentage,
2300 uint16_t class_percentage,
2301 uint32_t random_seed) {
2302 const std::string base_dex_location = "base.apk";
2303 ProfileCompilationInfo info;
2304 // The limits are defined by the dex specification.
2305 const uint16_t max_methods = std::numeric_limits<uint16_t>::max();
2306 const uint16_t max_classes = std::numeric_limits<uint16_t>::max();
2307 uint16_t number_of_methods = max_methods * method_percentage / 100;
2308 uint16_t number_of_classes = max_classes * class_percentage / 100;
2309
2310 std::srand(random_seed);
2311
2312 // Make sure we generate more samples with a low index value.
2313 // This makes it more likely to hit valid method/class indices in small apps.
2314 const uint16_t kFavorFirstN = 10000;
2315 const uint16_t kFavorSplit = 2;
2316
2317 for (uint16_t i = 0; i < number_of_dex_files; i++) {
2318 std::string dex_location = DexFileLoader::GetMultiDexLocation(i, base_dex_location.c_str());
2319 std::string profile_key = info.GetProfileDexFileBaseKey(dex_location);
2320
2321 DexFileData* const data =
2322 info.GetOrAddDexFileData(profile_key, /*checksum=*/ 0, max_classes, max_methods);
2323 for (uint16_t m = 0; m < number_of_methods; m++) {
2324 uint16_t method_idx = rand() % max_methods;
2325 if (m < (number_of_methods / kFavorSplit)) {
2326 method_idx %= kFavorFirstN;
2327 }
2328 // Alternate between startup and post startup.
2329 uint32_t flags = MethodHotness::kFlagHot;
2330 flags |= ((m & 1) != 0) ? MethodHotness::kFlagPostStartup : MethodHotness::kFlagStartup;
2331 data->AddMethod(static_cast<MethodHotness::Flag>(flags), method_idx);
2332 }
2333
2334 for (uint16_t c = 0; c < number_of_classes; c++) {
2335 uint16_t type_idx = rand() % max_classes;
2336 if (c < (number_of_classes / kFavorSplit)) {
2337 type_idx %= kFavorFirstN;
2338 }
2339 data->class_set.insert(dex::TypeIndex(type_idx));
2340 }
2341 }
2342 return info.Save(fd);
2343 }
2344
2345 // Naive implementation to generate a random profile file suitable for testing.
2346 // Description of random selection:
2347 // * Select a random starting point S.
2348 // * For every index i, add (S+i) % (N - total number of methods/classes) to profile with the
2349 // probably of 1/(N - i - number of methods/classes needed to add in profile).
GenerateTestProfile(int fd,std::vector<std::unique_ptr<const DexFile>> & dex_files,uint16_t method_percentage,uint16_t class_percentage,uint32_t random_seed)2350 bool ProfileCompilationInfo::GenerateTestProfile(
2351 int fd,
2352 std::vector<std::unique_ptr<const DexFile>>& dex_files,
2353 uint16_t method_percentage,
2354 uint16_t class_percentage,
2355 uint32_t random_seed) {
2356 ProfileCompilationInfo info;
2357 std::default_random_engine rng(random_seed);
2358 auto create_shuffled_range = [&rng](uint32_t take, uint32_t out_of) {
2359 CHECK_LE(take, out_of);
2360 std::vector<uint32_t> vec(out_of);
2361 std::iota(vec.begin(), vec.end(), 0u);
2362 std::shuffle(vec.begin(), vec.end(), rng);
2363 vec.erase(vec.begin() + take, vec.end());
2364 std::sort(vec.begin(), vec.end());
2365 return vec;
2366 };
2367 for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
2368 const std::string& dex_location = dex_file->GetLocation();
2369 std::string profile_key = info.GetProfileDexFileBaseKey(dex_location);
2370 uint32_t checksum = dex_file->GetLocationChecksum();
2371
2372 uint32_t number_of_classes = dex_file->NumClassDefs();
2373 uint32_t classes_required_in_profile = (number_of_classes * class_percentage) / 100;
2374
2375 DexFileData* const data = info.GetOrAddDexFileData(
2376 profile_key, checksum, dex_file->NumTypeIds(), dex_file->NumMethodIds());
2377 for (uint32_t class_index : create_shuffled_range(classes_required_in_profile,
2378 number_of_classes)) {
2379 data->class_set.insert(dex_file->GetClassDef(class_index).class_idx_);
2380 }
2381
2382 uint32_t number_of_methods = dex_file->NumMethodIds();
2383 uint32_t methods_required_in_profile = (number_of_methods * method_percentage) / 100;
2384 for (uint32_t method_index : create_shuffled_range(methods_required_in_profile,
2385 number_of_methods)) {
2386 // Alternate between startup and post startup.
2387 uint32_t flags = MethodHotness::kFlagHot;
2388 flags |= ((method_index & 1) != 0)
2389 ? MethodHotness::kFlagPostStartup
2390 : MethodHotness::kFlagStartup;
2391 data->AddMethod(static_cast<MethodHotness::Flag>(flags), method_index);
2392 }
2393 }
2394 return info.Save(fd);
2395 }
2396
IsEmpty() const2397 bool ProfileCompilationInfo::IsEmpty() const {
2398 DCHECK_EQ(info_.size(), profile_key_map_.size());
2399 // Note that this doesn't look at the bitmap region, so we will return true
2400 // when the profile contains only non-hot methods. This is generally ok
2401 // as for speed-profile to be useful we do need hot methods and resolved classes.
2402 return GetNumberOfMethods() == 0 && GetNumberOfResolvedClasses() == 0;
2403 }
2404
2405 ProfileCompilationInfo::InlineCacheMap*
FindOrAddHotMethod(uint16_t method_index)2406 ProfileCompilationInfo::DexFileData::FindOrAddHotMethod(uint16_t method_index) {
2407 if (method_index >= num_method_ids) {
2408 LOG(ERROR) << "Invalid method index " << method_index << ". num_method_ids=" << num_method_ids;
2409 return nullptr;
2410 }
2411 return &(method_map.FindOrAdd(
2412 method_index,
2413 InlineCacheMap(std::less<uint16_t>(), allocator_->Adapter(kArenaAllocProfile)))->second);
2414 }
2415
2416 // Mark a method as executed at least once.
AddMethod(MethodHotness::Flag flags,size_t index)2417 bool ProfileCompilationInfo::DexFileData::AddMethod(MethodHotness::Flag flags, size_t index) {
2418 if (index >= num_method_ids || index > kMaxSupportedMethodIndex) {
2419 LOG(ERROR) << "Invalid method index " << index << ". num_method_ids=" << num_method_ids
2420 << ", max: " << kMaxSupportedMethodIndex;
2421 return false;
2422 }
2423
2424 SetMethodHotness(index, flags);
2425
2426 if ((flags & MethodHotness::kFlagHot) != 0) {
2427 ProfileCompilationInfo::InlineCacheMap* result = FindOrAddHotMethod(index);
2428 DCHECK(result != nullptr);
2429 }
2430 return true;
2431 }
2432
SetMethodHotness(size_t index,MethodHotness::Flag flags)2433 void ProfileCompilationInfo::DexFileData::SetMethodHotness(size_t index,
2434 MethodHotness::Flag flags) {
2435 DCHECK_LT(index, num_method_ids);
2436 ForMethodBitmapHotnessFlags([&](MethodHotness::Flag flag) {
2437 if ((flags & flag) != 0) {
2438 method_bitmap.StoreBit(MethodFlagBitmapIndex(
2439 static_cast<MethodHotness::Flag>(flag), index), /*value=*/ true);
2440 }
2441 return true;
2442 });
2443 }
2444
GetHotnessInfo(uint32_t dex_method_index) const2445 ProfileCompilationInfo::MethodHotness ProfileCompilationInfo::DexFileData::GetHotnessInfo(
2446 uint32_t dex_method_index) const {
2447 MethodHotness ret;
2448 ForMethodBitmapHotnessFlags([&](MethodHotness::Flag flag) {
2449 if (method_bitmap.LoadBit(MethodFlagBitmapIndex(
2450 static_cast<MethodHotness::Flag>(flag), dex_method_index))) {
2451 ret.AddFlag(static_cast<MethodHotness::Flag>(flag));
2452 }
2453 return true;
2454 });
2455 auto it = method_map.find(dex_method_index);
2456 if (it != method_map.end()) {
2457 ret.SetInlineCacheMap(&it->second);
2458 ret.AddFlag(MethodHotness::kFlagHot);
2459 }
2460 return ret;
2461 }
2462
2463 // To simplify the implementation we use the MethodHotness flag values as indexes into the internal
2464 // bitmap representation. As such, they should never change unless the profile version is updated
2465 // and the implementation changed accordingly.
2466 static_assert(ProfileCompilationInfo::MethodHotness::kFlagFirst == 1 << 0);
2467 static_assert(ProfileCompilationInfo::MethodHotness::kFlagHot == 1 << 0);
2468 static_assert(ProfileCompilationInfo::MethodHotness::kFlagStartup == 1 << 1);
2469 static_assert(ProfileCompilationInfo::MethodHotness::kFlagPostStartup == 1 << 2);
2470 static_assert(ProfileCompilationInfo::MethodHotness::kFlagLastRegular == 1 << 2);
2471 static_assert(ProfileCompilationInfo::MethodHotness::kFlag32bit == 1 << 3);
2472 static_assert(ProfileCompilationInfo::MethodHotness::kFlag64bit == 1 << 4);
2473 static_assert(ProfileCompilationInfo::MethodHotness::kFlagSensitiveThread == 1 << 5);
2474 static_assert(ProfileCompilationInfo::MethodHotness::kFlagAmStartup == 1 << 6);
2475 static_assert(ProfileCompilationInfo::MethodHotness::kFlagAmPostStartup == 1 << 7);
2476 static_assert(ProfileCompilationInfo::MethodHotness::kFlagBoot == 1 << 8);
2477 static_assert(ProfileCompilationInfo::MethodHotness::kFlagPostBoot == 1 << 9);
2478 static_assert(ProfileCompilationInfo::MethodHotness::kFlagStartupBin == 1 << 10);
2479 static_assert(ProfileCompilationInfo::MethodHotness::kFlagStartupMaxBin == 1 << 15);
2480 static_assert(ProfileCompilationInfo::MethodHotness::kFlagLastBoot == 1 << 15);
2481
GetUsedBitmapFlags() const2482 uint16_t ProfileCompilationInfo::DexFileData::GetUsedBitmapFlags() const {
2483 uint32_t used_flags = 0u;
2484 ForMethodBitmapHotnessFlags([&](MethodHotness::Flag flag) {
2485 size_t index = FlagBitmapIndex(static_cast<MethodHotness::Flag>(flag));
2486 if (method_bitmap.HasSomeBitSet(index * num_method_ids, num_method_ids)) {
2487 used_flags |= flag;
2488 }
2489 return true;
2490 });
2491 return dchecked_integral_cast<uint16_t>(used_flags);
2492 }
2493
2494 ProfileCompilationInfo::DexPcData*
FindOrAddDexPc(InlineCacheMap * inline_cache,uint32_t dex_pc)2495 ProfileCompilationInfo::FindOrAddDexPc(InlineCacheMap* inline_cache, uint32_t dex_pc) {
2496 return &(inline_cache->FindOrAdd(dex_pc, DexPcData(inline_cache->get_allocator()))->second);
2497 }
2498
GetClassDescriptors(const std::vector<const DexFile * > & dex_files,const ProfileSampleAnnotation & annotation)2499 HashSet<std::string> ProfileCompilationInfo::GetClassDescriptors(
2500 const std::vector<const DexFile*>& dex_files,
2501 const ProfileSampleAnnotation& annotation) {
2502 HashSet<std::string> ret;
2503 for (const DexFile* dex_file : dex_files) {
2504 const DexFileData* data = FindDexDataUsingAnnotations(dex_file, annotation);
2505 if (data != nullptr) {
2506 for (dex::TypeIndex type_idx : data->class_set) {
2507 ret.insert(GetTypeDescriptor(dex_file, type_idx));
2508 }
2509 } else {
2510 VLOG(compiler) << "Failed to find profile data for " << dex_file->GetLocation();
2511 }
2512 }
2513 return ret;
2514 }
2515
IsProfileFile(int fd)2516 bool ProfileCompilationInfo::IsProfileFile(int fd) {
2517 // First check if it's an empty file as we allow empty profile files.
2518 // Profiles may be created by ActivityManager or installd before we manage to
2519 // process them in the runtime or profman.
2520 struct stat stat_buffer;
2521 if (fstat(fd, &stat_buffer) != 0) {
2522 return false;
2523 }
2524
2525 if (stat_buffer.st_size == 0) {
2526 return true;
2527 }
2528
2529 // The files is not empty. Check if it contains the profile magic.
2530 size_t byte_count = sizeof(kProfileMagic);
2531 uint8_t buffer[sizeof(kProfileMagic)];
2532 if (!android::base::ReadFullyAtOffset(fd, buffer, byte_count, /*offset=*/ 0)) {
2533 return false;
2534 }
2535
2536 // Reset the offset to prepare the file for reading.
2537 off_t rc = TEMP_FAILURE_RETRY(lseek(fd, 0, SEEK_SET));
2538 if (rc == static_cast<off_t>(-1)) {
2539 PLOG(ERROR) << "Failed to reset the offset";
2540 return false;
2541 }
2542
2543 return memcmp(buffer, kProfileMagic, byte_count) == 0;
2544 }
2545
UpdateProfileKeys(const std::vector<std::unique_ptr<const DexFile>> & dex_files,bool * matched)2546 bool ProfileCompilationInfo::UpdateProfileKeys(
2547 const std::vector<std::unique_ptr<const DexFile>>& dex_files, /*out*/ bool* matched) {
2548 // This check aligns with when dex2oat falls back from "speed-profile" to "verify".
2549 //
2550 // ART Service relies on the exit code of profman, which is determined by the value of `matched`,
2551 // to judge whether it should re-dexopt for "speed-profile". Therefore, a misalignment will cause
2552 // repeated dexopt.
2553 if (IsEmpty()) {
2554 *matched = false;
2555 return true;
2556 }
2557 DCHECK(!info_.empty());
2558
2559 *matched = true;
2560
2561 // A map from the old base key to the new base key.
2562 std::unordered_map<std::string, std::string> old_key_to_new_key;
2563
2564 // A map from the new base key to all matching old base keys (an invert of the map above), for
2565 // detecting duplicate keys.
2566 std::unordered_map<std::string, std::unordered_set<std::string>> new_key_to_old_keys;
2567
2568 for (const std::unique_ptr<DexFileData>& dex_data : info_) {
2569 std::string old_base_key = GetBaseKeyFromAugmentedKey(dex_data->profile_key);
2570 bool found = false;
2571 for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
2572 if (dex_data->checksum == dex_file->GetLocationChecksum() &&
2573 dex_data->num_type_ids == dex_file->NumTypeIds() &&
2574 dex_data->num_method_ids == dex_file->NumMethodIds()) {
2575 std::string new_base_key = GetProfileDexFileBaseKey(dex_file->GetLocation());
2576 old_key_to_new_key[old_base_key] = new_base_key;
2577 new_key_to_old_keys[new_base_key].insert(old_base_key);
2578 found = true;
2579 break;
2580 }
2581 }
2582 if (!found) {
2583 *matched = false;
2584 // Keep the old key.
2585 old_key_to_new_key[old_base_key] = old_base_key;
2586 new_key_to_old_keys[old_base_key].insert(old_base_key);
2587 }
2588 }
2589
2590 for (const auto& [new_key, old_keys] : new_key_to_old_keys) {
2591 if (old_keys.size() > 1) {
2592 LOG(ERROR) << "Cannot update multiple profile keys [" << android::base::Join(old_keys, ", ")
2593 << "] to the same new key '" << new_key << "'";
2594 return false;
2595 }
2596 }
2597
2598 // Check passed. Now perform the actual mutation.
2599 profile_key_map_.clear();
2600
2601 for (const std::unique_ptr<DexFileData>& dex_data : info_) {
2602 std::string old_base_key = GetBaseKeyFromAugmentedKey(dex_data->profile_key);
2603 const std::string& new_base_key = old_key_to_new_key[old_base_key];
2604 DCHECK(!new_base_key.empty());
2605 // Retain the annotation (if any) during the renaming by re-attaching the info from the old key.
2606 dex_data->profile_key = MigrateAnnotationInfo(new_base_key, dex_data->profile_key);
2607 profile_key_map_.Put(dex_data->profile_key, dex_data->profile_index);
2608 }
2609
2610 return true;
2611 }
2612
ProfileFilterFnAcceptAll(const std::string & dex_location,uint32_t checksum)2613 bool ProfileCompilationInfo::ProfileFilterFnAcceptAll(
2614 [[maybe_unused]] const std::string& dex_location, [[maybe_unused]] uint32_t checksum) {
2615 return true;
2616 }
2617
ClearData()2618 void ProfileCompilationInfo::ClearData() {
2619 profile_key_map_.clear();
2620 info_.clear();
2621 extra_descriptors_indexes_.clear();
2622 extra_descriptors_.clear();
2623 }
2624
ClearDataAndAdjustVersion(bool for_boot_image)2625 void ProfileCompilationInfo::ClearDataAndAdjustVersion(bool for_boot_image) {
2626 ClearData();
2627 memcpy(version_,
2628 for_boot_image ? kProfileVersionForBootImage : kProfileVersion,
2629 kProfileVersionSize);
2630 }
2631
IsForBootImage() const2632 bool ProfileCompilationInfo::IsForBootImage() const {
2633 return memcmp(version_, kProfileVersionForBootImage, sizeof(kProfileVersionForBootImage)) == 0;
2634 }
2635
GetVersion() const2636 const uint8_t* ProfileCompilationInfo::GetVersion() const {
2637 return version_;
2638 }
2639
ContainsClass(dex::TypeIndex type_index) const2640 bool ProfileCompilationInfo::DexFileData::ContainsClass(dex::TypeIndex type_index) const {
2641 return class_set.find(type_index) != class_set.end();
2642 }
2643
ClassesDataSize() const2644 uint32_t ProfileCompilationInfo::DexFileData::ClassesDataSize() const {
2645 return class_set.empty()
2646 ? 0u
2647 : sizeof(ProfileIndexType) + // Which dex file.
2648 sizeof(uint16_t) + // Number of classes.
2649 sizeof(uint16_t) * class_set.size(); // Type index diffs.
2650 }
2651
WriteClasses(SafeBuffer & buffer) const2652 void ProfileCompilationInfo::DexFileData::WriteClasses(SafeBuffer& buffer) const {
2653 if (class_set.empty()) {
2654 return;
2655 }
2656 buffer.WriteUintAndAdvance(profile_index);
2657 buffer.WriteUintAndAdvance(dchecked_integral_cast<uint16_t>(class_set.size()));
2658 WriteClassSet(buffer, class_set);
2659 }
2660
ReadClasses(SafeBuffer & buffer,const dchecked_vector<ExtraDescriptorIndex> & extra_descriptors_remap,std::string * error)2661 ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::DexFileData::ReadClasses(
2662 SafeBuffer& buffer,
2663 const dchecked_vector<ExtraDescriptorIndex>& extra_descriptors_remap,
2664 std::string* error) {
2665 uint16_t classes_size;
2666 if (!buffer.ReadUintAndAdvance(&classes_size)) {
2667 *error = "Error reading classes size.";
2668 return ProfileLoadStatus::kBadData;
2669 }
2670 uint16_t num_valid_type_indexes = dchecked_integral_cast<uint16_t>(
2671 std::min<size_t>(num_type_ids + extra_descriptors_remap.size(), DexFile::kDexNoIndex16));
2672 uint16_t type_index = 0u;
2673 for (size_t i = 0; i != classes_size; ++i) {
2674 uint16_t type_index_diff;
2675 if (!buffer.ReadUintAndAdvance(&type_index_diff)) {
2676 *error = "Error reading class type index diff.";
2677 return ProfileLoadStatus::kBadData;
2678 }
2679 if (type_index_diff == 0u && i != 0u) {
2680 *error = "Duplicate type index.";
2681 return ProfileLoadStatus::kBadData;
2682 }
2683 if (type_index_diff >= num_valid_type_indexes - type_index) {
2684 *error = "Invalid type index.";
2685 return ProfileLoadStatus::kBadData;
2686 }
2687 type_index += type_index_diff;
2688 if (type_index >= num_type_ids) {
2689 uint32_t new_extra_descriptor_index = extra_descriptors_remap[type_index - num_type_ids];
2690 if (new_extra_descriptor_index >= DexFile::kDexNoIndex16 - num_type_ids) {
2691 *error = "Remapped type index out of range.";
2692 return ProfileLoadStatus::kMergeError;
2693 }
2694 class_set.insert(dex::TypeIndex(num_type_ids + new_extra_descriptor_index));
2695 } else {
2696 class_set.insert(dex::TypeIndex(type_index));
2697 }
2698 }
2699 return ProfileLoadStatus::kSuccess;
2700 }
2701
SkipClasses(SafeBuffer & buffer,std::string * error)2702 ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::DexFileData::SkipClasses(
2703 SafeBuffer& buffer,
2704 std::string* error) {
2705 uint16_t classes_size;
2706 if (!buffer.ReadUintAndAdvance(&classes_size)) {
2707 *error = "Error reading classes size to skip.";
2708 return ProfileLoadStatus::kBadData;
2709 }
2710 size_t following_data_size = static_cast<size_t>(classes_size) * sizeof(uint16_t);
2711 if (following_data_size > buffer.GetAvailableBytes()) {
2712 *error = "Classes data size to skip exceeds remaining data.";
2713 return ProfileLoadStatus::kBadData;
2714 }
2715 buffer.Advance(following_data_size);
2716 return ProfileLoadStatus::kSuccess;
2717 }
2718
MethodsDataSize(uint16_t * method_flags,size_t * saved_bitmap_bit_size) const2719 uint32_t ProfileCompilationInfo::DexFileData::MethodsDataSize(
2720 /*out*/ uint16_t* method_flags,
2721 /*out*/ size_t* saved_bitmap_bit_size) const {
2722 uint16_t local_method_flags = GetUsedBitmapFlags();
2723 size_t local_saved_bitmap_bit_size = POPCOUNT(local_method_flags) * num_method_ids;
2724 if (!method_map.empty()) {
2725 local_method_flags |= enum_cast<uint16_t>(MethodHotness::kFlagHot);
2726 }
2727 size_t size = 0u;
2728 if (local_method_flags != 0u) {
2729 size_t num_hot_methods = method_map.size();
2730 size_t num_dex_pc_entries = 0u;
2731 size_t num_class_entries = 0u;
2732 for (const auto& method_entry : method_map) {
2733 const InlineCacheMap& inline_cache_map = method_entry.second;
2734 num_dex_pc_entries += inline_cache_map.size();
2735 for (const auto& inline_cache_entry : inline_cache_map) {
2736 const DexPcData& dex_pc_data = inline_cache_entry.second;
2737 num_class_entries += dex_pc_data.classes.size();
2738 }
2739 }
2740
2741 constexpr size_t kPerHotMethodSize =
2742 sizeof(uint16_t) + // Method index diff.
2743 sizeof(uint16_t); // Inline cache size.
2744 constexpr size_t kPerDexPcEntrySize =
2745 sizeof(uint16_t) + // Dex PC.
2746 sizeof(uint8_t); // Number of inline cache classes.
2747 constexpr size_t kPerClassEntrySize =
2748 sizeof(uint16_t); // Type index diff.
2749
2750 size_t saved_bitmap_byte_size = BitsToBytesRoundUp(local_saved_bitmap_bit_size);
2751 size = sizeof(ProfileIndexType) + // Which dex file.
2752 sizeof(uint32_t) + // Total size of following data.
2753 sizeof(uint16_t) + // Method flags.
2754 saved_bitmap_byte_size + // Bitmap data.
2755 num_hot_methods * kPerHotMethodSize + // Data for hot methods.
2756 num_dex_pc_entries * kPerDexPcEntrySize + // Data for dex pc entries.
2757 num_class_entries * kPerClassEntrySize; // Data for inline cache class entries.
2758 }
2759 if (method_flags != nullptr) {
2760 *method_flags = local_method_flags;
2761 }
2762 if (saved_bitmap_bit_size != nullptr) {
2763 *saved_bitmap_bit_size = local_saved_bitmap_bit_size;
2764 }
2765 return size;
2766 }
2767
WriteMethods(SafeBuffer & buffer) const2768 void ProfileCompilationInfo::DexFileData::WriteMethods(SafeBuffer& buffer) const {
2769 uint16_t method_flags;
2770 size_t saved_bitmap_bit_size;
2771 uint32_t methods_data_size = MethodsDataSize(&method_flags, &saved_bitmap_bit_size);
2772 if (methods_data_size == 0u) {
2773 return; // No data to write.
2774 }
2775 DCHECK_GE(buffer.GetAvailableBytes(), methods_data_size);
2776 uint32_t expected_available_bytes_at_end = buffer.GetAvailableBytes() - methods_data_size;
2777
2778 // Write the profile index.
2779 buffer.WriteUintAndAdvance(profile_index);
2780 // Write the total size of the following methods data (without the profile index
2781 // and the total size itself) for easy skipping when the dex file is filtered out.
2782 uint32_t following_data_size = methods_data_size - sizeof(ProfileIndexType) - sizeof(uint32_t);
2783 buffer.WriteUintAndAdvance(following_data_size);
2784 // Write the used method flags.
2785 buffer.WriteUintAndAdvance(method_flags);
2786
2787 // Write the bitmap data.
2788 size_t saved_bitmap_byte_size = BitsToBytesRoundUp(saved_bitmap_bit_size);
2789 DCHECK_LE(saved_bitmap_byte_size, buffer.GetAvailableBytes());
2790 BitMemoryRegion saved_bitmap(buffer.GetCurrentPtr(), /*bit_start=*/ 0, saved_bitmap_bit_size);
2791 size_t saved_bitmap_index = 0u;
2792 ForMethodBitmapHotnessFlags([&](MethodHotness::Flag flag) {
2793 if ((method_flags & flag) != 0u) {
2794 size_t index = FlagBitmapIndex(static_cast<MethodHotness::Flag>(flag));
2795 BitMemoryRegion src = method_bitmap.Subregion(index * num_method_ids, num_method_ids);
2796 saved_bitmap.Subregion(saved_bitmap_index * num_method_ids, num_method_ids).CopyBits(src);
2797 ++saved_bitmap_index;
2798 }
2799 return true;
2800 });
2801 DCHECK_EQ(saved_bitmap_index * num_method_ids, saved_bitmap_bit_size);
2802 // Clear the padding bits.
2803 size_t padding_bit_size = saved_bitmap_byte_size * kBitsPerByte - saved_bitmap_bit_size;
2804 BitMemoryRegion padding_region(buffer.GetCurrentPtr(), saved_bitmap_bit_size, padding_bit_size);
2805 padding_region.StoreBits(/*bit_offset=*/ 0u, /*value=*/ 0u, /*bit_length=*/ padding_bit_size);
2806 buffer.Advance(saved_bitmap_byte_size);
2807
2808 uint16_t last_method_index = 0;
2809 for (const auto& method_entry : method_map) {
2810 uint16_t method_index = method_entry.first;
2811 const InlineCacheMap& inline_cache_map = method_entry.second;
2812
2813 // Store the difference between the method indices for better compression.
2814 // The SafeMap is ordered by method_id, so the difference will always be non negative.
2815 DCHECK_GE(method_index, last_method_index);
2816 uint16_t diff_with_last_method_index = method_index - last_method_index;
2817 last_method_index = method_index;
2818 buffer.WriteUintAndAdvance(diff_with_last_method_index);
2819
2820 // Add inline cache map size.
2821 buffer.WriteUintAndAdvance(dchecked_integral_cast<uint16_t>(inline_cache_map.size()));
2822
2823 // Add inline cache entries.
2824 for (const auto& inline_cache_entry : inline_cache_map) {
2825 uint16_t dex_pc = inline_cache_entry.first;
2826 const DexPcData& dex_pc_data = inline_cache_entry.second;
2827 const ArenaSet<dex::TypeIndex>& classes = dex_pc_data.classes;
2828
2829 // Add the dex pc.
2830 buffer.WriteUintAndAdvance(dex_pc);
2831
2832 // Add the megamorphic/missing_types encoding if needed and continue.
2833 // In either cases we don't add any classes to the profiles and so there's
2834 // no point to continue.
2835 // TODO: in case we miss types there is still value to add the rest of the
2836 // classes. (This requires changing profile version or using a new section type.)
2837 if (dex_pc_data.is_missing_types) {
2838 // At this point the megamorphic flag should not be set.
2839 DCHECK(!dex_pc_data.is_megamorphic);
2840 DCHECK_EQ(classes.size(), 0u);
2841 buffer.WriteUintAndAdvance(kIsMissingTypesEncoding);
2842 continue;
2843 } else if (dex_pc_data.is_megamorphic) {
2844 DCHECK_EQ(classes.size(), 0u);
2845 buffer.WriteUintAndAdvance(kIsMegamorphicEncoding);
2846 continue;
2847 }
2848
2849 DCHECK_LT(classes.size(), ProfileCompilationInfo::kIndividualInlineCacheSize);
2850 DCHECK_NE(classes.size(), 0u) << "InlineCache contains a dex_pc with 0 classes";
2851
2852 // Add the number of classes for the dex PC.
2853 buffer.WriteUintAndAdvance(dchecked_integral_cast<uint8_t>(classes.size()));
2854 // Store the class set.
2855 WriteClassSet(buffer, classes);
2856 }
2857 }
2858
2859 // Check if we've written the right number of bytes.
2860 DCHECK_EQ(buffer.GetAvailableBytes(), expected_available_bytes_at_end);
2861 }
2862
ReadMethods(SafeBuffer & buffer,const dchecked_vector<ExtraDescriptorIndex> & extra_descriptors_remap,std::string * error)2863 ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::DexFileData::ReadMethods(
2864 SafeBuffer& buffer,
2865 const dchecked_vector<ExtraDescriptorIndex>& extra_descriptors_remap,
2866 std::string* error) {
2867 uint32_t following_data_size;
2868 if (!buffer.ReadUintAndAdvance(&following_data_size)) {
2869 *error = "Error reading methods data size.";
2870 return ProfileLoadStatus::kBadData;
2871 }
2872 if (following_data_size > buffer.GetAvailableBytes()) {
2873 *error = "Methods data size exceeds available data size.";
2874 return ProfileLoadStatus::kBadData;
2875 }
2876 uint32_t expected_available_bytes_at_end = buffer.GetAvailableBytes() - following_data_size;
2877
2878 // Read method flags.
2879 uint16_t method_flags;
2880 if (!buffer.ReadUintAndAdvance(&method_flags)) {
2881 *error = "Error reading method flags.";
2882 return ProfileLoadStatus::kBadData;
2883 }
2884 if (!is_for_boot_image && method_flags >= (MethodHotness::kFlagLastRegular << 1)) {
2885 // The profile we're loading contains data for boot image.
2886 *error = "Method flags contain boot image profile flags for non-boot image profile.";
2887 return ProfileLoadStatus::kBadData;
2888 }
2889
2890 // Read method bitmap.
2891 size_t saved_bitmap_bit_size = POPCOUNT(method_flags & ~MethodHotness::kFlagHot) * num_method_ids;
2892 size_t saved_bitmap_byte_size = BitsToBytesRoundUp(saved_bitmap_bit_size);
2893 if (sizeof(uint16_t) + saved_bitmap_byte_size > following_data_size) {
2894 *error = "Insufficient available data for method bitmap.";
2895 return ProfileLoadStatus::kBadData;
2896 }
2897 BitMemoryRegion saved_bitmap(buffer.GetCurrentPtr(), /*bit_start=*/ 0, saved_bitmap_bit_size);
2898 size_t saved_bitmap_index = 0u;
2899 ForMethodBitmapHotnessFlags([&](MethodHotness::Flag flag) {
2900 if ((method_flags & flag) != 0u) {
2901 size_t index = FlagBitmapIndex(static_cast<MethodHotness::Flag>(flag));
2902 BitMemoryRegion src =
2903 saved_bitmap.Subregion(saved_bitmap_index * num_method_ids, num_method_ids);
2904 method_bitmap.Subregion(index * num_method_ids, num_method_ids).OrBits(src);
2905 ++saved_bitmap_index;
2906 }
2907 return true;
2908 });
2909 buffer.Advance(saved_bitmap_byte_size);
2910
2911 // Load hot methods.
2912 if ((method_flags & MethodHotness::kFlagHot) != 0u) {
2913 uint32_t num_valid_method_indexes =
2914 std::min<uint32_t>(kMaxSupportedMethodIndex + 1u, num_method_ids);
2915 uint16_t num_valid_type_indexes = dchecked_integral_cast<uint16_t>(
2916 std::min<size_t>(num_type_ids + extra_descriptors_remap.size(), DexFile::kDexNoIndex16));
2917 uint16_t method_index = 0;
2918 bool first_diff = true;
2919 while (buffer.GetAvailableBytes() > expected_available_bytes_at_end) {
2920 uint16_t diff_with_last_method_index;
2921 if (!buffer.ReadUintAndAdvance(&diff_with_last_method_index)) {
2922 *error = "Error reading method index diff.";
2923 return ProfileLoadStatus::kBadData;
2924 }
2925 if (diff_with_last_method_index == 0u && !first_diff) {
2926 *error = "Duplicate method index.";
2927 return ProfileLoadStatus::kBadData;
2928 }
2929 first_diff = false;
2930 if (diff_with_last_method_index >= num_valid_method_indexes - method_index) {
2931 *error = "Invalid method index.";
2932 return ProfileLoadStatus::kBadData;
2933 }
2934 method_index += diff_with_last_method_index;
2935 InlineCacheMap* inline_cache = FindOrAddHotMethod(method_index);
2936 DCHECK(inline_cache != nullptr);
2937
2938 // Load inline cache map size.
2939 uint16_t inline_cache_size;
2940 if (!buffer.ReadUintAndAdvance(&inline_cache_size)) {
2941 *error = "Error reading inline cache size.";
2942 return ProfileLoadStatus::kBadData;
2943 }
2944 for (uint16_t ic_index = 0; ic_index != inline_cache_size; ++ic_index) {
2945 // Load dex pc.
2946 uint16_t dex_pc;
2947 if (!buffer.ReadUintAndAdvance(&dex_pc)) {
2948 *error = "Error reading inline cache dex pc.";
2949 return ProfileLoadStatus::kBadData;
2950 }
2951 DexPcData* dex_pc_data = FindOrAddDexPc(inline_cache, dex_pc);
2952 DCHECK(dex_pc_data != nullptr);
2953
2954 // Load inline cache classes.
2955 uint8_t inline_cache_classes_size;
2956 if (!buffer.ReadUintAndAdvance(&inline_cache_classes_size)) {
2957 *error = "Error reading inline cache classes size.";
2958 return ProfileLoadStatus::kBadData;
2959 }
2960 if (inline_cache_classes_size == kIsMissingTypesEncoding) {
2961 dex_pc_data->SetIsMissingTypes();
2962 } else if (inline_cache_classes_size == kIsMegamorphicEncoding) {
2963 dex_pc_data->SetIsMegamorphic();
2964 } else if (inline_cache_classes_size >= kIndividualInlineCacheSize) {
2965 *error = "Inline cache size too large.";
2966 return ProfileLoadStatus::kBadData;
2967 } else {
2968 uint16_t type_index = 0u;
2969 for (size_t i = 0; i != inline_cache_classes_size; ++i) {
2970 uint16_t type_index_diff;
2971 if (!buffer.ReadUintAndAdvance(&type_index_diff)) {
2972 *error = "Error reading inline cache type index diff.";
2973 return ProfileLoadStatus::kBadData;
2974 }
2975 if (type_index_diff == 0u && i != 0u) {
2976 *error = "Duplicate inline cache type index.";
2977 return ProfileLoadStatus::kBadData;
2978 }
2979 if (type_index_diff >= num_valid_type_indexes - type_index) {
2980 *error = "Invalid inline cache type index.";
2981 return ProfileLoadStatus::kBadData;
2982 }
2983 type_index += type_index_diff;
2984 if (type_index >= num_type_ids) {
2985 ExtraDescriptorIndex new_extra_descriptor_index =
2986 extra_descriptors_remap[type_index - num_type_ids];
2987 if (new_extra_descriptor_index >= DexFile::kDexNoIndex16 - num_type_ids) {
2988 *error = "Remapped inline cache type index out of range.";
2989 return ProfileLoadStatus::kMergeError;
2990 }
2991 dex_pc_data->AddClass(dex::TypeIndex(num_type_ids + new_extra_descriptor_index));
2992 } else {
2993 dex_pc_data->AddClass(dex::TypeIndex(type_index));
2994 }
2995 }
2996 }
2997 }
2998 }
2999 }
3000
3001 if (buffer.GetAvailableBytes() != expected_available_bytes_at_end) {
3002 *error = "Methods data did not end at expected position.";
3003 return ProfileLoadStatus::kBadData;
3004 }
3005
3006 return ProfileLoadStatus::kSuccess;
3007 }
3008
SkipMethods(SafeBuffer & buffer,std::string * error)3009 ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::DexFileData::SkipMethods(
3010 SafeBuffer& buffer,
3011 std::string* error) {
3012 uint32_t following_data_size;
3013 if (!buffer.ReadUintAndAdvance(&following_data_size)) {
3014 *error = "Error reading methods data size to skip.";
3015 return ProfileLoadStatus::kBadData;
3016 }
3017 if (following_data_size > buffer.GetAvailableBytes()) {
3018 *error = "Methods data size to skip exceeds remaining data.";
3019 return ProfileLoadStatus::kBadData;
3020 }
3021 buffer.Advance(following_data_size);
3022 return ProfileLoadStatus::kSuccess;
3023 }
3024
WriteClassSet(SafeBuffer & buffer,const ArenaSet<dex::TypeIndex> & class_set)3025 void ProfileCompilationInfo::DexFileData::WriteClassSet(
3026 SafeBuffer& buffer,
3027 const ArenaSet<dex::TypeIndex>& class_set) {
3028 // Store the difference between the type indexes for better compression.
3029 uint16_t last_type_index = 0u;
3030 for (const dex::TypeIndex& type_index : class_set) {
3031 DCHECK_GE(type_index.index_, last_type_index);
3032 uint16_t diff_with_last_type_index = type_index.index_ - last_type_index;
3033 last_type_index = type_index.index_;
3034 buffer.WriteUintAndAdvance(diff_with_last_type_index);
3035 }
3036 }
3037
GetSizeWarningThresholdBytes() const3038 size_t ProfileCompilationInfo::GetSizeWarningThresholdBytes() const {
3039 return IsForBootImage() ? kSizeWarningThresholdBootBytes : kSizeWarningThresholdBytes;
3040 }
3041
GetSizeErrorThresholdBytes() const3042 size_t ProfileCompilationInfo::GetSizeErrorThresholdBytes() const {
3043 return IsForBootImage() ? kSizeErrorThresholdBootBytes : kSizeErrorThresholdBytes;
3044 }
3045
operator <<(std::ostream & stream,ProfileCompilationInfo::DexReferenceDumper dumper)3046 std::ostream& operator<<(std::ostream& stream,
3047 ProfileCompilationInfo::DexReferenceDumper dumper) {
3048 stream << "[profile_key=" << dumper.GetProfileKey()
3049 << ",dex_checksum=" << std::hex << dumper.GetDexChecksum() << std::dec
3050 << ",num_type_ids=" << dumper.GetNumTypeIds()
3051 << ",num_method_ids=" << dumper.GetNumMethodIds()
3052 << "]";
3053 return stream;
3054 }
3055
FlattenProfileData()3056 FlattenProfileData::FlattenProfileData() :
3057 max_aggregation_for_methods_(0),
3058 max_aggregation_for_classes_(0) {
3059 }
3060
ItemMetadata()3061 FlattenProfileData::ItemMetadata::ItemMetadata() :
3062 flags_(0) {
3063 }
3064
ExtractInlineCacheInfo(const ProfileCompilationInfo & profile_info,const DexFile * dex_file,uint16_t dex_method_idx)3065 void FlattenProfileData::ItemMetadata::ExtractInlineCacheInfo(
3066 const ProfileCompilationInfo& profile_info,
3067 const DexFile* dex_file,
3068 uint16_t dex_method_idx) {
3069 ProfileCompilationInfo::MethodHotness hotness =
3070 profile_info.GetMethodHotness(MethodReference(dex_file, dex_method_idx));
3071 DCHECK(!hotness.IsHot() || hotness.GetInlineCacheMap() != nullptr);
3072 if (!hotness.IsHot() || hotness.GetInlineCacheMap()->empty()) {
3073 return;
3074 }
3075 const dex::MethodId& id = dex_file->GetMethodId(dex_method_idx);
3076 const ProfileCompilationInfo::InlineCacheMap* inline_caches = hotness.GetInlineCacheMap();
3077 const dex::ClassDef* class_def = dex_file->FindClassDef(id.class_idx_);
3078 if (class_def == nullptr) {
3079 // No class def found.
3080 return;
3081 }
3082
3083 CodeItemInstructionAccessor accessor(
3084 *dex_file, dex_file->GetCodeItem(dex_file->FindCodeItemOffset(*class_def, dex_method_idx)));
3085 for (const auto& [pc, ic_data] : *inline_caches) {
3086 if (pc >= accessor.InsnsSizeInCodeUnits()) {
3087 // Inlined inline caches are not supported in AOT, so discard any pc beyond the
3088 // code item size. See also `HInliner::GetInlineCacheAOT`.
3089 continue;
3090 }
3091 const Instruction& inst = accessor.InstructionAt(pc);
3092 const dex::MethodId& target = dex_file->GetMethodId(inst.VRegB());
3093 if (ic_data.classes.empty() && !ic_data.is_megamorphic && !ic_data.is_missing_types) {
3094 continue;
3095 }
3096 InlineCacheInfo& val =
3097 inline_cache_.FindOrAdd(TypeReference(dex_file, target.class_idx_))->second;
3098 if (ic_data.is_megamorphic) {
3099 val.is_megamorphic_ = true;
3100 }
3101 if (ic_data.is_missing_types) {
3102 val.is_missing_types_ = true;
3103 }
3104 for (dex::TypeIndex type_index : ic_data.classes) {
3105 val.classes_.insert(profile_info.GetTypeDescriptor(dex_file, type_index));
3106 }
3107 }
3108 }
3109
ExtractProfileData(const std::vector<std::unique_ptr<const DexFile>> & dex_files) const3110 std::unique_ptr<FlattenProfileData> ProfileCompilationInfo::ExtractProfileData(
3111 const std::vector<std::unique_ptr<const DexFile>>& dex_files) const {
3112
3113 std::unique_ptr<FlattenProfileData> result(new FlattenProfileData());
3114
3115 auto create_metadata_fn = []() { return FlattenProfileData::ItemMetadata(); };
3116
3117 // Iterate through all the dex files, find the methods/classes associated with each of them,
3118 // and add them to the flatten result.
3119 for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
3120 // Find all the dex data for the given dex file.
3121 // We may have multiple dex data if the methods or classes were added using
3122 // different annotations.
3123 std::vector<const DexFileData*> all_dex_data;
3124 FindAllDexData(dex_file.get(), &all_dex_data);
3125 for (const DexFileData* dex_data : all_dex_data) {
3126 // Extract the annotation from the key as we want to store it in the flatten result.
3127 ProfileSampleAnnotation annotation = GetAnnotationFromKey(dex_data->profile_key);
3128
3129 // Check which methods from the current dex files are in the profile.
3130 for (uint32_t method_idx = 0; method_idx < dex_data->num_method_ids; ++method_idx) {
3131 MethodHotness hotness = dex_data->GetHotnessInfo(method_idx);
3132 if (!hotness.IsInProfile()) {
3133 // Not in the profile, continue.
3134 continue;
3135 }
3136 // The method is in the profile, create metadata item for it and added to the result.
3137 MethodReference ref(dex_file.get(), method_idx);
3138 FlattenProfileData::ItemMetadata& metadata =
3139 result->method_metadata_.GetOrCreate(ref, create_metadata_fn);
3140 metadata.flags_ |= hotness.flags_;
3141 metadata.annotations_.push_back(annotation);
3142 metadata.ExtractInlineCacheInfo(*this, dex_file.get(), method_idx);
3143
3144 // Update the max aggregation counter for methods.
3145 // This is essentially a cache, to avoid traversing all the methods just to find out
3146 // this value.
3147 result->max_aggregation_for_methods_ = std::max(
3148 result->max_aggregation_for_methods_,
3149 static_cast<uint32_t>(metadata.annotations_.size()));
3150 }
3151
3152 // Check which classes from the current dex files are in the profile.
3153 for (const dex::TypeIndex& type_index : dex_data->class_set) {
3154 if (type_index.index_ >= dex_file->NumTypeIds()) {
3155 // Not a valid `dex::TypeIndex` for `TypeReference`.
3156 // TODO: Rewrite the API to use descriptors or the `ProfileCompilationInfo` directly
3157 // instead of the `FlattenProfileData` helper class.
3158 continue;
3159 }
3160 TypeReference ref(dex_file.get(), type_index);
3161 FlattenProfileData::ItemMetadata& metadata =
3162 result->class_metadata_.GetOrCreate(ref, create_metadata_fn);
3163 metadata.annotations_.push_back(annotation);
3164 // Update the max aggregation counter for classes.
3165 result->max_aggregation_for_classes_ = std::max(
3166 result->max_aggregation_for_classes_,
3167 static_cast<uint32_t>(metadata.annotations_.size()));
3168 }
3169 }
3170 }
3171
3172 return result;
3173 }
3174
MergeInlineCacheInfo(const SafeMap<TypeReference,InlineCacheInfo,TypeReferenceValueComparator> & other)3175 void FlattenProfileData::ItemMetadata::MergeInlineCacheInfo(
3176 const SafeMap<TypeReference, InlineCacheInfo, TypeReferenceValueComparator>& other) {
3177 for (const auto& [target, inline_cache_data] : other) {
3178 if (!inline_cache_data.is_megamorphic_ && !inline_cache_data.is_missing_types_ &&
3179 inline_cache_data.classes_.empty()) {
3180 continue;
3181 }
3182 InlineCacheInfo& val = inline_cache_.FindOrAdd(target)->second;
3183 if (inline_cache_data.is_megamorphic_) {
3184 val.is_megamorphic_ = true;
3185 }
3186 if (inline_cache_data.is_missing_types_) {
3187 val.is_missing_types_ = true;
3188 }
3189 for (const std::string& cls : inline_cache_data.classes_) {
3190 val.classes_.insert(cls);
3191 }
3192 }
3193 }
3194
MergeData(const FlattenProfileData & other)3195 void FlattenProfileData::MergeData(const FlattenProfileData& other) {
3196 auto create_metadata_fn = []() { return FlattenProfileData::ItemMetadata(); };
3197 for (const auto& it : other.method_metadata_) {
3198 const MethodReference& otherRef = it.first;
3199 const FlattenProfileData::ItemMetadata otherData = it.second;
3200 const std::list<ProfileCompilationInfo::ProfileSampleAnnotation>& other_annotations =
3201 otherData.GetAnnotations();
3202
3203 FlattenProfileData::ItemMetadata& metadata =
3204 method_metadata_.GetOrCreate(otherRef, create_metadata_fn);
3205 metadata.flags_ |= otherData.GetFlags();
3206 metadata.annotations_.insert(
3207 metadata.annotations_.end(), other_annotations.begin(), other_annotations.end());
3208 metadata.MergeInlineCacheInfo(otherData.GetInlineCache());
3209
3210 max_aggregation_for_methods_ = std::max(
3211 max_aggregation_for_methods_,
3212 static_cast<uint32_t>(metadata.annotations_.size()));
3213 }
3214 for (const auto& it : other.class_metadata_) {
3215 const TypeReference& otherRef = it.first;
3216 const FlattenProfileData::ItemMetadata otherData = it.second;
3217 const std::list<ProfileCompilationInfo::ProfileSampleAnnotation>& other_annotations =
3218 otherData.GetAnnotations();
3219
3220 FlattenProfileData::ItemMetadata& metadata =
3221 class_metadata_.GetOrCreate(otherRef, create_metadata_fn);
3222 metadata.flags_ |= otherData.GetFlags();
3223 metadata.annotations_.insert(
3224 metadata.annotations_.end(), other_annotations.begin(), other_annotations.end());
3225
3226 max_aggregation_for_classes_ = std::max(
3227 max_aggregation_for_classes_,
3228 static_cast<uint32_t>(metadata.annotations_.size()));
3229 }
3230 }
3231
3232 } // namespace art
3233