1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_OAT_OAT_QUICK_METHOD_HEADER_H_ 18 #define ART_RUNTIME_OAT_OAT_QUICK_METHOD_HEADER_H_ 19 20 #include <optional> 21 22 #include "arch/instruction_set.h" 23 #include "base/locks.h" 24 #include "base/macros.h" 25 #include "base/utils.h" 26 #include "quick/quick_method_frame_info.h" 27 #include "stack_map.h" 28 29 namespace art HIDDEN { 30 31 class ArtMethod; 32 33 // Size in bytes of the should_deoptimize flag on stack. 34 // We just need 4 bytes for our purpose regardless of the architecture. Frame size 35 // calculation will automatically do alignment for the final frame size. 36 static constexpr size_t kShouldDeoptimizeFlagSize = 4; 37 38 // OatQuickMethodHeader precedes the raw code chunk generated by the compiler. 39 class PACKED(4) OatQuickMethodHeader { 40 public: 41 OatQuickMethodHeader(uint32_t code_info_offset = 0) { 42 SetCodeInfoOffset(code_info_offset); 43 } 44 45 static OatQuickMethodHeader* NterpMethodHeader; 46 EXPORT static ArrayRef<const uint8_t> NterpWithClinitImpl; 47 EXPORT static ArrayRef<const uint8_t> NterpImpl; 48 49 EXPORT bool IsNterpMethodHeader() const; 50 IsNterpPc(uintptr_t pc)51 static bool IsNterpPc(uintptr_t pc) { 52 return OatQuickMethodHeader::NterpMethodHeader != nullptr && 53 OatQuickMethodHeader::NterpMethodHeader->Contains(pc); 54 } 55 FromCodePointer(const void * code_ptr)56 static OatQuickMethodHeader* FromCodePointer(const void* code_ptr) { 57 uintptr_t code = reinterpret_cast<uintptr_t>(code_ptr); 58 uintptr_t header = code - OFFSETOF_MEMBER(OatQuickMethodHeader, code_); 59 DCHECK(IsAlignedParam(code, GetInstructionSetCodeAlignment(kRuntimeQuickCodeISA)) || 60 IsAlignedParam(header, GetInstructionSetCodeAlignment(kRuntimeQuickCodeISA))) 61 << std::hex << code << " " << std::hex << header; 62 return reinterpret_cast<OatQuickMethodHeader*>(header); 63 } 64 FromEntryPoint(const void * entry_point)65 static OatQuickMethodHeader* FromEntryPoint(const void* entry_point) { 66 return FromCodePointer(EntryPointToCodePointer(entry_point)); 67 } 68 InstructionAlignedSize()69 static size_t InstructionAlignedSize() { 70 return RoundUp(sizeof(OatQuickMethodHeader), 71 GetInstructionSetCodeAlignment(kRuntimeQuickCodeISA)); 72 } 73 74 OatQuickMethodHeader(const OatQuickMethodHeader&) = default; 75 OatQuickMethodHeader& operator=(const OatQuickMethodHeader&) = default; 76 NativeQuickPcOffset(const uintptr_t pc)77 uintptr_t NativeQuickPcOffset(const uintptr_t pc) const { 78 return pc - reinterpret_cast<uintptr_t>(GetEntryPoint()); 79 } 80 81 // Check if this is hard-written assembly (i.e. inside libart.so). 82 // Returns std::nullop on Mac. 83 static std::optional<bool> IsStub(const uint8_t* pc); 84 IsOptimized()85 ALWAYS_INLINE bool IsOptimized() const { 86 if (code_ == NterpWithClinitImpl.data() || code_ == NterpImpl.data()) { 87 DCHECK(IsStub(code_).value_or(true)); 88 return false; 89 } 90 DCHECK(!IsStub(code_).value_or(false)); 91 return true; 92 } 93 GetOptimizedCodeInfoPtr()94 ALWAYS_INLINE const uint8_t* GetOptimizedCodeInfoPtr() const { 95 uint32_t offset = GetCodeInfoOffset(); 96 DCHECK_NE(offset, 0u); 97 return code_ - offset; 98 } 99 GetOptimizedCodeInfoPtr()100 ALWAYS_INLINE uint8_t* GetOptimizedCodeInfoPtr() { 101 uint32_t offset = GetCodeInfoOffset(); 102 DCHECK_NE(offset, 0u); 103 return code_ - offset; 104 } 105 GetCode()106 ALWAYS_INLINE const uint8_t* GetCode() const { 107 return code_; 108 } 109 GetCodeSize()110 ALWAYS_INLINE uint32_t GetCodeSize() const { 111 if (code_ == NterpWithClinitImpl.data()) { 112 return NterpWithClinitImpl.size(); 113 } 114 if (code_ == NterpImpl.data()) { 115 return NterpImpl.size(); 116 } 117 return CodeInfo::DecodeCodeSize(GetOptimizedCodeInfoPtr()); 118 } 119 GetCodeInfoOffset()120 ALWAYS_INLINE uint32_t GetCodeInfoOffset() const { 121 DCHECK(IsOptimized()); 122 return code_info_offset_; 123 } 124 SetCodeInfoOffset(uint32_t offset)125 void SetCodeInfoOffset(uint32_t offset) { code_info_offset_ = offset; } 126 Contains(uintptr_t pc)127 bool Contains(uintptr_t pc) const { 128 uintptr_t code_start = reinterpret_cast<uintptr_t>(code_); 129 // Let's not make assumptions about other architectures. 130 #if defined(__aarch64__) || defined(__riscv__) || defined(__riscv) 131 // Verify that the code pointer is not tagged. Memory for code gets allocated with 132 // mspace_memalign or memory mapped from a file, neither of which is tagged by MTE/HWASan. 133 DCHECK_EQ(code_start, reinterpret_cast<uintptr_t>(code_start) & ((UINT64_C(1) << 56) - 1)); 134 #endif 135 static_assert(kRuntimeQuickCodeISA != InstructionSet::kThumb2, 136 "kThumb2 cannot be a runtime ISA"); 137 if (kRuntimeQuickCodeISA == InstructionSet::kArm) { 138 // On Thumb-2, the pc is offset by one. 139 code_start++; 140 } 141 return code_start <= pc && pc <= (code_start + GetCodeSize()); 142 } 143 GetEntryPoint()144 const uint8_t* GetEntryPoint() const { 145 // When the runtime architecture is ARM, `kRuntimeQuickCodeISA` is set to `kArm` 146 // (not `kThumb2`), *but* we always generate code for the Thumb-2 147 // instruction set anyway. Thumb-2 requires the entrypoint to be of 148 // offset 1. 149 static_assert(kRuntimeQuickCodeISA != InstructionSet::kThumb2, 150 "kThumb2 cannot be a runtime ISA"); 151 return (kRuntimeQuickCodeISA == InstructionSet::kArm) 152 ? reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(code_) | 1) 153 : code_; 154 } 155 156 template <bool kCheckFrameSize = true> GetFrameSizeInBytes()157 uint32_t GetFrameSizeInBytes() const { 158 uint32_t result = GetFrameInfo().FrameSizeInBytes(); 159 if (kCheckFrameSize) { 160 DCHECK_ALIGNED(result, kStackAlignment); 161 } 162 return result; 163 } 164 GetFrameInfo()165 QuickMethodFrameInfo GetFrameInfo() const { 166 DCHECK(IsOptimized()); 167 return CodeInfo::DecodeFrameInfo(GetOptimizedCodeInfoPtr()); 168 } 169 GetShouldDeoptimizeFlagOffset()170 size_t GetShouldDeoptimizeFlagOffset() const { 171 DCHECK(IsOptimized()); 172 QuickMethodFrameInfo frame_info = GetFrameInfo(); 173 size_t frame_size = frame_info.FrameSizeInBytes(); 174 size_t core_spill_size = 175 POPCOUNT(frame_info.CoreSpillMask()) * GetBytesPerGprSpillLocation(kRuntimeQuickCodeISA); 176 size_t fpu_spill_size = 177 POPCOUNT(frame_info.FpSpillMask()) * GetBytesPerFprSpillLocation(kRuntimeQuickCodeISA); 178 return frame_size - core_spill_size - fpu_spill_size - kShouldDeoptimizeFlagSize; 179 } 180 181 // For non-catch handlers. Only used in test code. 182 EXPORT uintptr_t ToNativeQuickPc(ArtMethod* method, 183 const uint32_t dex_pc, 184 bool abort_on_failure = true) const; 185 186 // For catch handlers. 187 uintptr_t ToNativeQuickPcForCatchHandlers(ArtMethod* method, 188 ArrayRef<const uint32_t> dex_pc_list, 189 /* out */ uint32_t* stack_map_row, 190 bool abort_on_failure = true) const; 191 192 uint32_t ToDexPc(ArtMethod** frame, 193 const uintptr_t pc, 194 bool abort_on_failure = true) const 195 REQUIRES_SHARED(Locks::mutator_lock_); 196 HasShouldDeoptimizeFlag()197 bool HasShouldDeoptimizeFlag() const { 198 return IsOptimized() && CodeInfo::HasShouldDeoptimizeFlag(GetOptimizedCodeInfoPtr()); 199 } 200 201 private: 202 uint32_t code_info_offset_ = 0u; 203 uint8_t code_[0]; // The actual method code. 204 }; 205 206 } // namespace art 207 208 #endif // ART_RUNTIME_OAT_OAT_QUICK_METHOD_HEADER_H_ 209