1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "stack.h"
18 #include <limits>
19
20 #include "android-base/stringprintf.h"
21
22 #include "arch/context.h"
23 #include "art_method-inl.h"
24 #include "base/callee_save_type.h"
25 #include "base/hex_dump.h"
26 #include "base/indenter.h"
27 #include "base/pointer_size.h"
28 #include "base/utils.h"
29 #include "dex/dex_file_types.h"
30 #include "entrypoints/entrypoint_utils-inl.h"
31 #include "entrypoints/quick/callee_save_frame.h"
32 #include "entrypoints/runtime_asm_entrypoints.h"
33 #include "gc/space/image_space.h"
34 #include "gc/space/space-inl.h"
35 #include "interpreter/mterp/nterp.h"
36 #include "interpreter/shadow_frame-inl.h"
37 #include "jit/jit.h"
38 #include "jit/jit_code_cache.h"
39 #include "linear_alloc.h"
40 #include "managed_stack.h"
41 #include "mirror/class-inl.h"
42 #include "mirror/object-inl.h"
43 #include "mirror/object_array-inl.h"
44 #include "nterp_helpers.h"
45 #include "oat/oat_quick_method_header.h"
46 #include "obj_ptr-inl.h"
47 #include "quick/quick_method_frame_info.h"
48 #include "runtime.h"
49 #include "thread.h"
50 #include "thread_list.h"
51
52 namespace art HIDDEN {
53
54 using android::base::StringPrintf;
55
56 static constexpr bool kDebugStackWalk = false;
57
StackVisitor(Thread * thread,Context * context,StackWalkKind walk_kind,bool check_suspended)58 StackVisitor::StackVisitor(Thread* thread,
59 Context* context,
60 StackWalkKind walk_kind,
61 bool check_suspended)
62 : StackVisitor(thread, context, walk_kind, 0, check_suspended) {}
63
StackVisitor(Thread * thread,Context * context,StackWalkKind walk_kind,size_t num_frames,bool check_suspended)64 StackVisitor::StackVisitor(Thread* thread,
65 Context* context,
66 StackWalkKind walk_kind,
67 size_t num_frames,
68 bool check_suspended)
69 : thread_(thread),
70 walk_kind_(walk_kind),
71 cur_shadow_frame_(nullptr),
72 cur_quick_frame_(nullptr),
73 cur_quick_frame_pc_(0),
74 cur_oat_quick_method_header_(nullptr),
75 num_frames_(num_frames),
76 cur_depth_(0),
77 cur_inline_info_(nullptr, CodeInfo()),
78 cur_stack_map_(0, StackMap()),
79 context_(context),
80 check_suspended_(check_suspended) {
81 if (check_suspended_) {
82 DCHECK(thread == Thread::Current() || thread->GetState() != ThreadState::kRunnable) << *thread;
83 }
84 }
85
GetCurrentInlineInfo() const86 CodeInfo* StackVisitor::GetCurrentInlineInfo() const {
87 DCHECK(!(*cur_quick_frame_)->IsNative());
88 const OatQuickMethodHeader* header = GetCurrentOatQuickMethodHeader();
89 if (cur_inline_info_.first != header) {
90 cur_inline_info_ = std::make_pair(header, CodeInfo::DecodeInlineInfoOnly(header));
91 }
92 return &cur_inline_info_.second;
93 }
94
GetCurrentStackMap() const95 StackMap* StackVisitor::GetCurrentStackMap() const {
96 DCHECK(!(*cur_quick_frame_)->IsNative());
97 const OatQuickMethodHeader* header = GetCurrentOatQuickMethodHeader();
98 if (cur_stack_map_.first != cur_quick_frame_pc_) {
99 uint32_t pc = header->NativeQuickPcOffset(cur_quick_frame_pc_);
100 cur_stack_map_ = std::make_pair(cur_quick_frame_pc_,
101 GetCurrentInlineInfo()->GetStackMapForNativePcOffset(pc));
102 }
103 return &cur_stack_map_.second;
104 }
105
GetMethod() const106 ArtMethod* StackVisitor::GetMethod() const {
107 if (cur_shadow_frame_ != nullptr) {
108 return cur_shadow_frame_->GetMethod();
109 } else if (cur_quick_frame_ != nullptr) {
110 if (IsInInlinedFrame()) {
111 CodeInfo* code_info = GetCurrentInlineInfo();
112 DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
113 return GetResolvedMethod(*GetCurrentQuickFrame(), *code_info, current_inline_frames_);
114 } else {
115 return *cur_quick_frame_;
116 }
117 }
118 return nullptr;
119 }
120
GetDexPc(bool abort_on_failure) const121 uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
122 if (cur_shadow_frame_ != nullptr) {
123 return cur_shadow_frame_->GetDexPC();
124 } else if (cur_quick_frame_ != nullptr) {
125 if (IsInInlinedFrame()) {
126 return current_inline_frames_.back().GetDexPc();
127 } else if (cur_oat_quick_method_header_ == nullptr) {
128 return dex::kDexNoIndex;
129 } else if ((*GetCurrentQuickFrame())->IsNative()) {
130 return cur_oat_quick_method_header_->ToDexPc(
131 GetCurrentQuickFrame(), cur_quick_frame_pc_, abort_on_failure);
132 } else if (cur_oat_quick_method_header_->IsOptimized()) {
133 StackMap* stack_map = GetCurrentStackMap();
134 if (!stack_map->IsValid()) {
135 // Debugging code for b/361916648.
136 CodeInfo code_info(cur_oat_quick_method_header_);
137 std::stringstream os;
138 VariableIndentationOutputStream vios(&os);
139 code_info.Dump(&vios, /* code_offset= */ 0u, /* verbose= */ true, kRuntimeQuickCodeISA);
140 LOG(FATAL) << os.str() << '\n'
141 << "StackMap not found for "
142 << std::hex << cur_quick_frame_pc_ << " in "
143 << GetMethod()->PrettyMethod()
144 << " @" << std::hex
145 << reinterpret_cast<uintptr_t>(cur_oat_quick_method_header_->GetCode());
146 }
147 return stack_map->GetDexPc();
148 } else {
149 DCHECK(cur_oat_quick_method_header_->IsNterpMethodHeader());
150 return NterpGetDexPC(cur_quick_frame_);
151 }
152 } else {
153 return 0;
154 }
155 }
156
ComputeDexPcList(uint32_t handler_dex_pc) const157 std::vector<uint32_t> StackVisitor::ComputeDexPcList(uint32_t handler_dex_pc) const {
158 std::vector<uint32_t> result;
159 if (cur_shadow_frame_ == nullptr && cur_quick_frame_ != nullptr && IsInInlinedFrame()) {
160 const BitTableRange<InlineInfo>& infos = current_inline_frames_;
161 DCHECK_NE(infos.size(), 0u);
162
163 // Outermost dex_pc.
164 result.push_back(GetCurrentStackMap()->GetDexPc());
165
166 // The mid dex_pcs. Note that we skip the last one since we want to change that for
167 // `handler_dex_pc`.
168 for (size_t index = 0; index < infos.size() - 1; ++index) {
169 result.push_back(infos[index].GetDexPc());
170 }
171 }
172
173 // The innermost dex_pc has to be the handler dex_pc. In the case of no inline frames, it will be
174 // just the one dex_pc. In the case of inlining we will be replacing the innermost InlineInfo's
175 // dex_pc with this one.
176 result.push_back(handler_dex_pc);
177 return result;
178 }
179
180 extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
181 REQUIRES_SHARED(Locks::mutator_lock_);
182
GetThisObject() const183 ObjPtr<mirror::Object> StackVisitor::GetThisObject() const {
184 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
185 ArtMethod* m = GetMethod();
186 if (m->IsStatic()) {
187 return nullptr;
188 } else if (m->IsNative()) {
189 if (cur_quick_frame_ != nullptr) {
190 // The `this` reference is stored in the first out vreg in the caller's frame.
191 const size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
192 auto* stack_ref = reinterpret_cast<StackReference<mirror::Object>*>(
193 reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size + sizeof(ArtMethod*));
194 return stack_ref->AsMirrorPtr();
195 } else {
196 return cur_shadow_frame_->GetVRegReference(0);
197 }
198 } else if (m->IsProxyMethod()) {
199 if (cur_quick_frame_ != nullptr) {
200 return artQuickGetProxyThisObject(cur_quick_frame_);
201 } else {
202 return cur_shadow_frame_->GetVRegReference(0);
203 }
204 } else {
205 CodeItemDataAccessor accessor(m->DexInstructionData());
206 if (!accessor.HasCodeItem()) {
207 UNIMPLEMENTED(ERROR) << "Failed to determine this object of abstract or proxy method: "
208 << ArtMethod::PrettyMethod(m);
209 return nullptr;
210 } else {
211 uint16_t reg = accessor.RegistersSize() - accessor.InsSize();
212 uint32_t value = 0;
213 if (!GetVReg(m, reg, kReferenceVReg, &value)) {
214 return nullptr;
215 }
216 return reinterpret_cast<mirror::Object*>(value);
217 }
218 }
219 }
220
GetNativePcOffset() const221 size_t StackVisitor::GetNativePcOffset() const {
222 DCHECK(!IsShadowFrame());
223 return GetCurrentOatQuickMethodHeader()->NativeQuickPcOffset(cur_quick_frame_pc_);
224 }
225
GetVRegFromDebuggerShadowFrame(uint16_t vreg,VRegKind kind,uint32_t * val) const226 bool StackVisitor::GetVRegFromDebuggerShadowFrame(uint16_t vreg,
227 VRegKind kind,
228 uint32_t* val) const {
229 size_t frame_id = const_cast<StackVisitor*>(this)->GetFrameId();
230 ShadowFrame* shadow_frame = thread_->FindDebuggerShadowFrame(frame_id);
231 if (shadow_frame != nullptr) {
232 bool* updated_vreg_flags = thread_->GetUpdatedVRegFlags(frame_id);
233 DCHECK(updated_vreg_flags != nullptr);
234 if (updated_vreg_flags[vreg]) {
235 // Value is set by the debugger.
236 if (kind == kReferenceVReg) {
237 *val = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(
238 shadow_frame->GetVRegReference(vreg)));
239 } else {
240 *val = shadow_frame->GetVReg(vreg);
241 }
242 return true;
243 }
244 }
245 // No value is set by the debugger.
246 return false;
247 }
248
GetVReg(ArtMethod * m,uint16_t vreg,VRegKind kind,uint32_t * val,std::optional<DexRegisterLocation> location,bool need_full_register_list) const249 bool StackVisitor::GetVReg(ArtMethod* m,
250 uint16_t vreg,
251 VRegKind kind,
252 uint32_t* val,
253 std::optional<DexRegisterLocation> location,
254 bool need_full_register_list) const {
255 if (cur_quick_frame_ != nullptr) {
256 DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
257 DCHECK(m == GetMethod());
258 // Check if there is value set by the debugger.
259 if (GetVRegFromDebuggerShadowFrame(vreg, kind, val)) {
260 return true;
261 }
262 bool result = false;
263 if (cur_oat_quick_method_header_->IsNterpMethodHeader()) {
264 result = true;
265 *val = (kind == kReferenceVReg)
266 ? NterpGetVRegReference(cur_quick_frame_, vreg)
267 : NterpGetVReg(cur_quick_frame_, vreg);
268 } else {
269 DCHECK(cur_oat_quick_method_header_->IsOptimized());
270 if (location.has_value() && kind != kReferenceVReg) {
271 uint32_t val2 = *val;
272 // The caller already known the register location, so we can use the faster overload
273 // which does not decode the stack maps.
274 result = GetVRegFromOptimizedCode(location.value(), val);
275 // Compare to the slower overload.
276 DCHECK_EQ(result, GetVRegFromOptimizedCode(m, vreg, kind, &val2, need_full_register_list));
277 DCHECK_EQ(*val, val2);
278 } else {
279 result = GetVRegFromOptimizedCode(m, vreg, kind, val, need_full_register_list);
280 }
281 }
282 if (kind == kReferenceVReg) {
283 // Perform a read barrier in case we are in a different thread and GC is ongoing.
284 mirror::Object* out = reinterpret_cast<mirror::Object*>(static_cast<uintptr_t>(*val));
285 uintptr_t ptr_out = reinterpret_cast<uintptr_t>(GcRoot<mirror::Object>(out).Read());
286 DCHECK_LT(ptr_out, std::numeric_limits<uint32_t>::max());
287 *val = static_cast<uint32_t>(ptr_out);
288 }
289 return result;
290 } else {
291 DCHECK(cur_shadow_frame_ != nullptr);
292 if (kind == kReferenceVReg) {
293 *val = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(
294 cur_shadow_frame_->GetVRegReference(vreg)));
295 } else {
296 *val = cur_shadow_frame_->GetVReg(vreg);
297 }
298 return true;
299 }
300 }
301
GetNumberOfRegisters(CodeInfo * code_info,int depth) const302 size_t StackVisitor::GetNumberOfRegisters(CodeInfo* code_info, int depth) const {
303 return depth == 0
304 ? code_info->GetNumberOfDexRegisters()
305 : current_inline_frames_[depth - 1].GetNumberOfDexRegisters();
306 }
307
GetVRegFromOptimizedCode(ArtMethod * m,uint16_t vreg,VRegKind kind,uint32_t * val,bool need_full_register_list) const308 bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m,
309 uint16_t vreg,
310 VRegKind kind,
311 uint32_t* val,
312 bool need_full_register_list) const {
313 DCHECK_EQ(m, GetMethod());
314 // Can't be null or how would we compile its instructions?
315 DCHECK(m->GetCodeItem() != nullptr) << m->PrettyMethod();
316 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
317 CodeInfo code_info(method_header);
318
319 uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
320 StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
321 DCHECK(stack_map.IsValid());
322
323 DexRegisterMap dex_register_map = (IsInInlinedFrame() && !need_full_register_list)
324 ? code_info.GetInlineDexRegisterMapOf(stack_map, current_inline_frames_.back())
325 : code_info.GetDexRegisterMapOf(stack_map,
326 /* first= */ 0,
327 GetNumberOfRegisters(&code_info, InlineDepth()));
328
329 if (dex_register_map.empty()) {
330 return false;
331 }
332
333 const size_t number_of_dex_registers = dex_register_map.size();
334 DCHECK_LT(vreg, number_of_dex_registers);
335 DexRegisterLocation::Kind location_kind = dex_register_map[vreg].GetKind();
336 switch (location_kind) {
337 case DexRegisterLocation::Kind::kInStack: {
338 const int32_t offset = dex_register_map[vreg].GetStackOffsetInBytes();
339 BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
340 if (kind == kReferenceVReg && !stack_mask.LoadBit(offset / kFrameSlotSize)) {
341 return false;
342 }
343 const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
344 *val = *reinterpret_cast<const uint32_t*>(addr);
345 return true;
346 }
347 case DexRegisterLocation::Kind::kInRegister: {
348 uint32_t register_mask = code_info.GetRegisterMaskOf(stack_map);
349 uint32_t reg = dex_register_map[vreg].GetMachineRegister();
350 if (kind == kReferenceVReg && !(register_mask & (1 << reg))) {
351 return false;
352 }
353 return GetRegisterIfAccessible(reg, location_kind, val);
354 }
355 case DexRegisterLocation::Kind::kInRegisterHigh:
356 case DexRegisterLocation::Kind::kInFpuRegister:
357 case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
358 if (kind == kReferenceVReg) {
359 return false;
360 }
361 uint32_t reg = dex_register_map[vreg].GetMachineRegister();
362 return GetRegisterIfAccessible(reg, location_kind, val);
363 }
364 case DexRegisterLocation::Kind::kConstant: {
365 uint32_t result = dex_register_map[vreg].GetConstant();
366 if (kind == kReferenceVReg && result != 0) {
367 return false;
368 }
369 *val = result;
370 return true;
371 }
372 case DexRegisterLocation::Kind::kNone:
373 return false;
374 default:
375 LOG(FATAL) << "Unexpected location kind " << dex_register_map[vreg].GetKind();
376 UNREACHABLE();
377 }
378 }
379
GetVRegFromOptimizedCode(DexRegisterLocation location,uint32_t * val) const380 bool StackVisitor::GetVRegFromOptimizedCode(DexRegisterLocation location, uint32_t* val) const {
381 switch (location.GetKind()) {
382 case DexRegisterLocation::Kind::kInvalid:
383 break;
384 case DexRegisterLocation::Kind::kInStack: {
385 const uint8_t* sp = reinterpret_cast<const uint8_t*>(cur_quick_frame_);
386 *val = *reinterpret_cast<const uint32_t*>(sp + location.GetStackOffsetInBytes());
387 return true;
388 }
389 case DexRegisterLocation::Kind::kInRegister:
390 case DexRegisterLocation::Kind::kInRegisterHigh:
391 case DexRegisterLocation::Kind::kInFpuRegister:
392 case DexRegisterLocation::Kind::kInFpuRegisterHigh:
393 return GetRegisterIfAccessible(location.GetMachineRegister(), location.GetKind(), val);
394 case DexRegisterLocation::Kind::kConstant:
395 *val = location.GetConstant();
396 return true;
397 case DexRegisterLocation::Kind::kNone:
398 return false;
399 }
400 LOG(FATAL) << "Unexpected location kind " << location.GetKind();
401 UNREACHABLE();
402 }
403
GetRegisterIfAccessible(uint32_t reg,DexRegisterLocation::Kind location_kind,uint32_t * val) const404 bool StackVisitor::GetRegisterIfAccessible(uint32_t reg,
405 DexRegisterLocation::Kind location_kind,
406 uint32_t* val) const {
407 const bool is_float = (location_kind == DexRegisterLocation::Kind::kInFpuRegister) ||
408 (location_kind == DexRegisterLocation::Kind::kInFpuRegisterHigh);
409
410 if (kRuntimeQuickCodeISA == InstructionSet::kX86 && is_float) {
411 // X86 float registers are 64-bit and each XMM register is provided as two separate
412 // 32-bit registers by the context.
413 reg = (location_kind == DexRegisterLocation::Kind::kInFpuRegisterHigh)
414 ? (2 * reg + 1)
415 : (2 * reg);
416 }
417
418 if (!IsAccessibleRegister(reg, is_float)) {
419 return false;
420 }
421 uintptr_t ptr_val = GetRegister(reg, is_float);
422 const bool target64 = Is64BitInstructionSet(kRuntimeQuickCodeISA);
423 if (target64) {
424 const bool is_high = (location_kind == DexRegisterLocation::Kind::kInRegisterHigh) ||
425 (location_kind == DexRegisterLocation::Kind::kInFpuRegisterHigh);
426 int64_t value_long = static_cast<int64_t>(ptr_val);
427 ptr_val = static_cast<uintptr_t>(is_high ? High32Bits(value_long) : Low32Bits(value_long));
428 }
429 *val = ptr_val;
430 return true;
431 }
432
GetVRegPairFromDebuggerShadowFrame(uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const433 bool StackVisitor::GetVRegPairFromDebuggerShadowFrame(uint16_t vreg,
434 VRegKind kind_lo,
435 VRegKind kind_hi,
436 uint64_t* val) const {
437 uint32_t low_32bits;
438 uint32_t high_32bits;
439 bool success = GetVRegFromDebuggerShadowFrame(vreg, kind_lo, &low_32bits);
440 success &= GetVRegFromDebuggerShadowFrame(vreg + 1, kind_hi, &high_32bits);
441 if (success) {
442 *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits);
443 }
444 return success;
445 }
446
GetVRegPair(ArtMethod * m,uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const447 bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
448 VRegKind kind_hi, uint64_t* val) const {
449 if (kind_lo == kLongLoVReg) {
450 DCHECK_EQ(kind_hi, kLongHiVReg);
451 } else if (kind_lo == kDoubleLoVReg) {
452 DCHECK_EQ(kind_hi, kDoubleHiVReg);
453 } else {
454 LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
455 UNREACHABLE();
456 }
457 // Check if there is value set by the debugger.
458 if (GetVRegPairFromDebuggerShadowFrame(vreg, kind_lo, kind_hi, val)) {
459 return true;
460 }
461 if (cur_quick_frame_ == nullptr) {
462 DCHECK(cur_shadow_frame_ != nullptr);
463 *val = cur_shadow_frame_->GetVRegLong(vreg);
464 return true;
465 }
466 if (cur_oat_quick_method_header_->IsNterpMethodHeader()) {
467 uint64_t val_lo = NterpGetVReg(cur_quick_frame_, vreg);
468 uint64_t val_hi = NterpGetVReg(cur_quick_frame_, vreg + 1);
469 *val = (val_hi << 32) + val_lo;
470 return true;
471 }
472
473 DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
474 DCHECK(m == GetMethod());
475 DCHECK(cur_oat_quick_method_header_->IsOptimized());
476 return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
477 }
478
GetVRegPairFromOptimizedCode(ArtMethod * m,uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const479 bool StackVisitor::GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
480 VRegKind kind_lo, VRegKind kind_hi,
481 uint64_t* val) const {
482 uint32_t low_32bits;
483 uint32_t high_32bits;
484 bool success = GetVRegFromOptimizedCode(m, vreg, kind_lo, &low_32bits);
485 success &= GetVRegFromOptimizedCode(m, vreg + 1, kind_hi, &high_32bits);
486 if (success) {
487 *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits);
488 }
489 return success;
490 }
491
PrepareSetVReg(ArtMethod * m,uint16_t vreg,bool wide)492 ShadowFrame* StackVisitor::PrepareSetVReg(ArtMethod* m, uint16_t vreg, bool wide) {
493 CodeItemDataAccessor accessor(m->DexInstructionData());
494 if (!accessor.HasCodeItem()) {
495 return nullptr;
496 }
497 ShadowFrame* shadow_frame = GetCurrentShadowFrame();
498 if (shadow_frame == nullptr) {
499 // This is a compiled frame: we must prepare and update a shadow frame that will
500 // be executed by the interpreter after deoptimization of the stack.
501 const size_t frame_id = GetFrameId();
502 const uint16_t num_regs = accessor.RegistersSize();
503 shadow_frame = thread_->FindOrCreateDebuggerShadowFrame(frame_id, num_regs, m, GetDexPc());
504 CHECK(shadow_frame != nullptr);
505 // Remember the vreg(s) has been set for debugging and must not be overwritten by the
506 // original value during deoptimization of the stack.
507 thread_->GetUpdatedVRegFlags(frame_id)[vreg] = true;
508 if (wide) {
509 thread_->GetUpdatedVRegFlags(frame_id)[vreg + 1] = true;
510 }
511 }
512 return shadow_frame;
513 }
514
SetVReg(ArtMethod * m,uint16_t vreg,uint32_t new_value,VRegKind kind)515 bool StackVisitor::SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind) {
516 DCHECK(kind == kIntVReg || kind == kFloatVReg);
517 ShadowFrame* shadow_frame = PrepareSetVReg(m, vreg, /* wide= */ false);
518 if (shadow_frame == nullptr) {
519 return false;
520 }
521 shadow_frame->SetVReg(vreg, new_value);
522 return true;
523 }
524
SetVRegReference(ArtMethod * m,uint16_t vreg,ObjPtr<mirror::Object> new_value)525 bool StackVisitor::SetVRegReference(ArtMethod* m, uint16_t vreg, ObjPtr<mirror::Object> new_value) {
526 ShadowFrame* shadow_frame = PrepareSetVReg(m, vreg, /* wide= */ false);
527 if (shadow_frame == nullptr) {
528 return false;
529 }
530 shadow_frame->SetVRegReference(vreg, new_value);
531 return true;
532 }
533
SetVRegPair(ArtMethod * m,uint16_t vreg,uint64_t new_value,VRegKind kind_lo,VRegKind kind_hi)534 bool StackVisitor::SetVRegPair(ArtMethod* m,
535 uint16_t vreg,
536 uint64_t new_value,
537 VRegKind kind_lo,
538 VRegKind kind_hi) {
539 if (kind_lo == kLongLoVReg) {
540 DCHECK_EQ(kind_hi, kLongHiVReg);
541 } else if (kind_lo == kDoubleLoVReg) {
542 DCHECK_EQ(kind_hi, kDoubleHiVReg);
543 } else {
544 LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
545 UNREACHABLE();
546 }
547 ShadowFrame* shadow_frame = PrepareSetVReg(m, vreg, /* wide= */ true);
548 if (shadow_frame == nullptr) {
549 return false;
550 }
551 shadow_frame->SetVRegLong(vreg, new_value);
552 return true;
553 }
554
IsAccessibleGPR(uint32_t reg) const555 bool StackVisitor::IsAccessibleGPR(uint32_t reg) const {
556 DCHECK(context_ != nullptr);
557 return context_->IsAccessibleGPR(reg);
558 }
559
GetGPRAddress(uint32_t reg) const560 uintptr_t* StackVisitor::GetGPRAddress(uint32_t reg) const {
561 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
562 DCHECK(context_ != nullptr);
563 return context_->GetGPRAddress(reg);
564 }
565
GetGPR(uint32_t reg) const566 uintptr_t StackVisitor::GetGPR(uint32_t reg) const {
567 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
568 DCHECK(context_ != nullptr);
569 return context_->GetGPR(reg);
570 }
571
IsAccessibleFPR(uint32_t reg) const572 bool StackVisitor::IsAccessibleFPR(uint32_t reg) const {
573 DCHECK(context_ != nullptr);
574 return context_->IsAccessibleFPR(reg);
575 }
576
GetFPR(uint32_t reg) const577 uintptr_t StackVisitor::GetFPR(uint32_t reg) const {
578 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
579 DCHECK(context_ != nullptr);
580 return context_->GetFPR(reg);
581 }
582
GetReturnPcAddr() const583 uintptr_t StackVisitor::GetReturnPcAddr() const {
584 uintptr_t sp = reinterpret_cast<uintptr_t>(GetCurrentQuickFrame());
585 DCHECK_NE(sp, 0u);
586 return sp + GetCurrentQuickFrameInfo().GetReturnPcOffset();
587 }
588
GetReturnPc() const589 uintptr_t StackVisitor::GetReturnPc() const {
590 return *reinterpret_cast<uintptr_t*>(GetReturnPcAddr());
591 }
592
SetReturnPc(uintptr_t new_ret_pc)593 void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
594 *reinterpret_cast<uintptr_t*>(GetReturnPcAddr()) = new_ret_pc;
595 }
596
ComputeNumFrames(Thread * thread,StackWalkKind walk_kind)597 size_t StackVisitor::ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) {
598 struct NumFramesVisitor : public StackVisitor {
599 NumFramesVisitor(Thread* thread_in, StackWalkKind walk_kind_in)
600 : StackVisitor(thread_in, nullptr, walk_kind_in), frames(0) {}
601
602 bool VisitFrame() override {
603 frames++;
604 return true;
605 }
606
607 size_t frames;
608 };
609 NumFramesVisitor visitor(thread, walk_kind);
610 visitor.WalkStack(true);
611 return visitor.frames;
612 }
613
GetNextMethodAndDexPc(ArtMethod ** next_method,uint32_t * next_dex_pc)614 bool StackVisitor::GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc) {
615 struct HasMoreFramesVisitor : public StackVisitor {
616 HasMoreFramesVisitor(Thread* thread,
617 StackWalkKind walk_kind,
618 size_t num_frames,
619 size_t frame_height)
620 : StackVisitor(thread, nullptr, walk_kind, num_frames),
621 frame_height_(frame_height),
622 found_frame_(false),
623 has_more_frames_(false),
624 next_method_(nullptr),
625 next_dex_pc_(0) {
626 }
627
628 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
629 if (found_frame_) {
630 ArtMethod* method = GetMethod();
631 if (method != nullptr && !method->IsRuntimeMethod()) {
632 has_more_frames_ = true;
633 next_method_ = method;
634 next_dex_pc_ = GetDexPc();
635 return false; // End stack walk once next method is found.
636 }
637 } else if (GetFrameHeight() == frame_height_) {
638 found_frame_ = true;
639 }
640 return true;
641 }
642
643 size_t frame_height_;
644 bool found_frame_;
645 bool has_more_frames_;
646 ArtMethod* next_method_;
647 uint32_t next_dex_pc_;
648 };
649 HasMoreFramesVisitor visitor(thread_, walk_kind_, GetNumFrames(), GetFrameHeight());
650 visitor.WalkStack(true);
651 *next_method = visitor.next_method_;
652 *next_dex_pc = visitor.next_dex_pc_;
653 return visitor.has_more_frames_;
654 }
655
DescribeStack(Thread * thread)656 void StackVisitor::DescribeStack(Thread* thread) {
657 struct DescribeStackVisitor : public StackVisitor {
658 explicit DescribeStackVisitor(Thread* thread_in)
659 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
660
661 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
662 LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
663 return true;
664 }
665 };
666 DescribeStackVisitor visitor(thread);
667 visitor.WalkStack(true);
668 }
669
DescribeLocation() const670 std::string StackVisitor::DescribeLocation() const {
671 std::string result("Visiting method '");
672 ArtMethod* m = GetMethod();
673 if (m == nullptr) {
674 return "upcall";
675 }
676 result += m->PrettyMethod();
677 result += StringPrintf("' at dex PC 0x%04x", GetDexPc());
678 if (!IsShadowFrame()) {
679 result += StringPrintf(" (native PC %p)", reinterpret_cast<void*>(GetCurrentQuickFramePc()));
680 }
681 return result;
682 }
683
SetMethod(ArtMethod * method)684 void StackVisitor::SetMethod(ArtMethod* method) {
685 DCHECK(GetMethod() != nullptr);
686 if (cur_shadow_frame_ != nullptr) {
687 cur_shadow_frame_->SetMethod(method);
688 } else {
689 DCHECK(cur_quick_frame_ != nullptr);
690 CHECK(!IsInInlinedFrame()) << "We do not support setting inlined method's ArtMethod: "
691 << GetMethod()->PrettyMethod() << " is inlined into "
692 << GetOuterMethod()->PrettyMethod();
693 *cur_quick_frame_ = method;
694 }
695 }
696
ValidateFrame() const697 void StackVisitor::ValidateFrame() const {
698 if (!kIsDebugBuild) {
699 return;
700 }
701 ArtMethod* method = GetMethod();
702 ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
703 // Runtime methods have null declaring class.
704 if (!method->IsRuntimeMethod()) {
705 CHECK(declaring_class != nullptr);
706 CHECK_EQ(declaring_class->GetClass(), declaring_class->GetClass()->GetClass())
707 << declaring_class;
708 } else {
709 CHECK(declaring_class == nullptr);
710 }
711 Runtime* const runtime = Runtime::Current();
712 LinearAlloc* const linear_alloc = runtime->GetLinearAlloc();
713 if (!linear_alloc->Contains(method)) {
714 // Check class linker linear allocs.
715 // We get the canonical method as copied methods may have been allocated
716 // by a different class loader.
717 const PointerSize ptrSize = runtime->GetClassLinker()->GetImagePointerSize();
718 ArtMethod* canonical = method->GetCanonicalMethod(ptrSize);
719 ObjPtr<mirror::Class> klass = canonical->GetDeclaringClass();
720 LinearAlloc* const class_linear_alloc = (klass != nullptr)
721 ? runtime->GetClassLinker()->GetAllocatorForClassLoader(klass->GetClassLoader())
722 : linear_alloc;
723 if (!class_linear_alloc->Contains(canonical)) {
724 // Check image space.
725 bool in_image = false;
726 for (auto& space : runtime->GetHeap()->GetContinuousSpaces()) {
727 if (space->IsImageSpace()) {
728 auto* image_space = space->AsImageSpace();
729 const auto& header = image_space->GetImageHeader();
730 const ImageSection& methods = header.GetMethodsSection();
731 const ImageSection& runtime_methods = header.GetRuntimeMethodsSection();
732 const size_t offset = reinterpret_cast<const uint8_t*>(canonical) - image_space->Begin();
733 if (methods.Contains(offset) || runtime_methods.Contains(offset)) {
734 in_image = true;
735 break;
736 }
737 }
738 }
739 CHECK(in_image) << canonical->PrettyMethod() << " not in linear alloc or image";
740 }
741 }
742 if (cur_quick_frame_ != nullptr) {
743 // Frame consistency checks.
744 size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
745 CHECK_NE(frame_size, 0u);
746 // For compiled code, we could try to have a rough guess at an upper size we expect
747 // to see for a frame:
748 // 256 registers
749 // 2 words HandleScope overhead
750 // 3+3 register spills
751 // const size_t kMaxExpectedFrameSize = (256 + 2 + 3 + 3) * sizeof(word);
752 const size_t kMaxExpectedFrameSize = interpreter::kNterpMaxFrame;
753 CHECK_LE(frame_size, kMaxExpectedFrameSize) << method->PrettyMethod();
754 size_t return_pc_offset = GetCurrentQuickFrameInfo().GetReturnPcOffset();
755 CHECK_LT(return_pc_offset, frame_size);
756 }
757 }
758
GetCurrentQuickFrameInfo() const759 QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const {
760 if (cur_oat_quick_method_header_ != nullptr) {
761 if (cur_oat_quick_method_header_->IsOptimized()) {
762 return cur_oat_quick_method_header_->GetFrameInfo();
763 } else {
764 DCHECK(cur_oat_quick_method_header_->IsNterpMethodHeader());
765 return NterpFrameInfo(cur_quick_frame_);
766 }
767 }
768
769 ArtMethod* method = GetMethod();
770 Runtime* runtime = Runtime::Current();
771
772 if (method->IsAbstract()) {
773 return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
774 }
775
776 // This goes before IsProxyMethod since runtime methods have a null declaring class.
777 if (method->IsRuntimeMethod()) {
778 return runtime->GetRuntimeMethodFrameInfo(method);
779 }
780
781 if (method->IsProxyMethod()) {
782 // There is only one direct method of a proxy class: the constructor. A direct method is
783 // cloned from the original java.lang.reflect.Proxy and is executed as usual quick
784 // compiled method without any stubs. Therefore the method must have a OatQuickMethodHeader.
785 DCHECK(!method->IsDirect() && !method->IsConstructor())
786 << "Constructors of proxy classes must have a OatQuickMethodHeader";
787 return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
788 }
789
790 // The only remaining cases are for native methods that either
791 // - use the Generic JNI stub, called either directly or through some
792 // (resolution, instrumentation) trampoline; or
793 // - fake a Generic JNI frame in art_jni_dlsym_lookup_critical_stub.
794 DCHECK(method->IsNative());
795 // Generic JNI frame is just like the SaveRefsAndArgs frame.
796 // Note that HandleScope, if any, is below the frame.
797 return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
798 }
799
GetShouldDeoptimizeFlagAddr() const800 uint8_t* StackVisitor::GetShouldDeoptimizeFlagAddr() const REQUIRES_SHARED(Locks::mutator_lock_) {
801 DCHECK(GetCurrentOatQuickMethodHeader()->HasShouldDeoptimizeFlag());
802 QuickMethodFrameInfo frame_info = GetCurrentQuickFrameInfo();
803 size_t frame_size = frame_info.FrameSizeInBytes();
804 uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
805 size_t core_spill_size =
806 POPCOUNT(frame_info.CoreSpillMask()) * GetBytesPerGprSpillLocation(kRuntimeQuickCodeISA);
807 size_t fpu_spill_size =
808 POPCOUNT(frame_info.FpSpillMask()) * GetBytesPerFprSpillLocation(kRuntimeQuickCodeISA);
809 size_t offset = frame_size - core_spill_size - fpu_spill_size - kShouldDeoptimizeFlagSize;
810 uint8_t* should_deoptimize_addr = sp + offset;
811 DCHECK_EQ(*should_deoptimize_addr & ~static_cast<uint8_t>(DeoptimizeFlagValue::kAll), 0);
812 return should_deoptimize_addr;
813 }
814
815 template <StackVisitor::CountTransitions kCount>
WalkStack(bool include_transitions)816 void StackVisitor::WalkStack(bool include_transitions) {
817 if (check_suspended_) {
818 DCHECK(thread_ == Thread::Current() || thread_->GetState() != ThreadState::kRunnable);
819 }
820 CHECK_EQ(cur_depth_, 0U);
821
822 for (const ManagedStack* current_fragment = thread_->GetManagedStack();
823 current_fragment != nullptr; current_fragment = current_fragment->GetLink()) {
824 cur_shadow_frame_ = current_fragment->GetTopShadowFrame();
825 cur_quick_frame_ = current_fragment->GetTopQuickFrame();
826 cur_quick_frame_pc_ = 0;
827 DCHECK(cur_oat_quick_method_header_ == nullptr);
828
829 if (kDebugStackWalk) {
830 LOG(INFO) << "Tid=" << thread_-> GetThreadId()
831 << ", ManagedStack fragement: " << current_fragment;
832 }
833
834 if (cur_quick_frame_ != nullptr) { // Handle quick stack frames.
835 // Can't be both a shadow and a quick fragment.
836 DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
837 ArtMethod* method = *cur_quick_frame_;
838 DCHECK(method != nullptr);
839 bool header_retrieved = false;
840 if (method->IsNative()) {
841 // We do not have a PC for the first frame, so we cannot simply use
842 // ArtMethod::GetOatQuickMethodHeader() as we're unable to distinguish there
843 // between GenericJNI frame and JIT-compiled JNI stub; the entrypoint may have
844 // changed since the frame was entered. The top quick frame tag indicates
845 // GenericJNI here, otherwise it's either AOT-compiled or JNI-compiled JNI stub.
846 if (UNLIKELY(current_fragment->GetTopQuickFrameGenericJniTag())) {
847 // The generic JNI does not have any method header.
848 cur_oat_quick_method_header_ = nullptr;
849 } else if (UNLIKELY(current_fragment->GetTopQuickFrameJitJniTag())) {
850 // Should be JITed code.
851 Runtime* runtime = Runtime::Current();
852 const void* code = runtime->GetJit()->GetCodeCache()->GetJniStubCode(method);
853 CHECK(code != nullptr) << method->PrettyMethod();
854 cur_oat_quick_method_header_ = OatQuickMethodHeader::FromCodePointer(code);
855 } else {
856 // We are sure we are not running GenericJni here. Though the entry point could still be
857 // GenericJnistub. The entry point is usually JITed or AOT code. It could be also a
858 // resolution stub if the class isn't visibly initialized yet.
859 const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
860 CHECK(existing_entry_point != nullptr);
861 Runtime* runtime = Runtime::Current();
862 ClassLinker* class_linker = runtime->GetClassLinker();
863 // Check whether we can quickly get the header from the current entrypoint.
864 if (!class_linker->IsQuickGenericJniStub(existing_entry_point) &&
865 !class_linker->IsQuickResolutionStub(existing_entry_point)) {
866 cur_oat_quick_method_header_ =
867 OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
868 } else {
869 const void* code = method->GetOatMethodQuickCode(class_linker->GetImagePointerSize());
870 if (code != nullptr) {
871 cur_oat_quick_method_header_ = OatQuickMethodHeader::FromEntryPoint(code);
872 } else {
873 // For non-debuggable runtimes, the JNI stub can be JIT-compiled or AOT-compiled, and
874 // can also reuse the stub in boot images. Since we checked for AOT code earlier, we
875 // must be running JITed code or boot JNI stub.
876 // For debuggable runtimes, we won't be here as we never use AOT code in debuggable.
877 // And the JIT situation is handled earlier as its SP will be tagged. But there is a
878 // special case where we change runtime state from non-debuggable to debuggable in
879 // the JNI implementation and do deopt inside, which could be treated as
880 // a case of non-debuggable as well.
881 if (runtime->GetJit() != nullptr) {
882 code = runtime->GetJit()->GetCodeCache()->GetJniStubCode(method);
883 }
884 if (code == nullptr) {
885 // Check if current method uses the boot JNI stub.
886 const void* boot_jni_stub = class_linker->FindBootJniStub(method);
887 if (boot_jni_stub != nullptr) {
888 code = EntryPointToCodePointer(boot_jni_stub);
889 }
890 }
891 CHECK(code != nullptr) << method->PrettyMethod();
892 cur_oat_quick_method_header_ = OatQuickMethodHeader::FromCodePointer(code);
893 }
894 }
895 }
896 header_retrieved = true;
897 }
898 while (method != nullptr) {
899 if (!header_retrieved) {
900 cur_oat_quick_method_header_ = method->GetOatQuickMethodHeader(cur_quick_frame_pc_);
901 }
902 header_retrieved = false; // Force header retrieval in next iteration.
903
904 if (kDebugStackWalk) {
905 LOG(INFO) << "Early print: Tid=" << thread_-> GetThreadId() << ", method: "
906 << ArtMethod::PrettyMethod(method) << "@" << method;
907 }
908 ValidateFrame();
909 if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
910 && (cur_oat_quick_method_header_ != nullptr)
911 && cur_oat_quick_method_header_->IsOptimized()
912 && !method->IsNative() // JNI methods cannot have any inlined frames.
913 && CodeInfo::HasInlineInfo(cur_oat_quick_method_header_->GetOptimizedCodeInfoPtr())) {
914 DCHECK_NE(cur_quick_frame_pc_, 0u);
915 CodeInfo* code_info = GetCurrentInlineInfo();
916 StackMap* stack_map = GetCurrentStackMap();
917 if (stack_map->IsValid() && stack_map->HasInlineInfo()) {
918 DCHECK_EQ(current_inline_frames_.size(), 0u);
919 for (current_inline_frames_ = code_info->GetInlineInfosOf(*stack_map);
920 !current_inline_frames_.empty();
921 current_inline_frames_.pop_back()) {
922 bool should_continue = VisitFrame();
923 if (UNLIKELY(!should_continue)) {
924 return;
925 }
926 cur_depth_++;
927 }
928 }
929 }
930
931 bool should_continue = VisitFrame();
932 if (UNLIKELY(!should_continue)) {
933 return;
934 }
935
936 QuickMethodFrameInfo frame_info = GetCurrentQuickFrameInfo();
937 if (context_ != nullptr) {
938 context_->FillCalleeSaves(reinterpret_cast<uint8_t*>(cur_quick_frame_), frame_info);
939 }
940 // Compute PC for next stack frame from return PC.
941 size_t frame_size = frame_info.FrameSizeInBytes();
942 uintptr_t return_pc_addr = GetReturnPcAddr();
943
944 cur_quick_frame_pc_ = *reinterpret_cast<uintptr_t*>(return_pc_addr);
945 uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size;
946 cur_quick_frame_ = reinterpret_cast<ArtMethod**>(next_frame);
947
948 if (kDebugStackWalk) {
949 LOG(INFO) << "Tid=" << thread_-> GetThreadId() << ", method: "
950 << ArtMethod::PrettyMethod(method) << "@" << method << " size=" << frame_size
951 << std::boolalpha
952 << " optimized=" << (cur_oat_quick_method_header_ != nullptr &&
953 cur_oat_quick_method_header_->IsOptimized())
954 << " native=" << method->IsNative()
955 << std::noboolalpha
956 << " entrypoints=" << method->GetEntryPointFromQuickCompiledCode()
957 << "," << (method->IsNative() ? method->GetEntryPointFromJni() : nullptr)
958 << " next=" << *cur_quick_frame_;
959 }
960
961 if (kCount == CountTransitions::kYes || !method->IsRuntimeMethod()) {
962 cur_depth_++;
963 }
964 method = *cur_quick_frame_;
965 }
966 // We reached a transition frame, it doesn't have a method header.
967 cur_oat_quick_method_header_ = nullptr;
968 } else if (cur_shadow_frame_ != nullptr) {
969 do {
970 if (kDebugStackWalk) {
971 ArtMethod* method = cur_shadow_frame_->GetMethod();
972 LOG(INFO) << "Tid=" << thread_-> GetThreadId() << ", method: "
973 << ArtMethod::PrettyMethod(method) << "@" << method
974 << ", ShadowFrame";
975 }
976 ValidateFrame();
977 bool should_continue = VisitFrame();
978 if (UNLIKELY(!should_continue)) {
979 return;
980 }
981 cur_depth_++;
982 cur_shadow_frame_ = cur_shadow_frame_->GetLink();
983 } while (cur_shadow_frame_ != nullptr);
984 }
985 if (include_transitions) {
986 bool should_continue = VisitFrame();
987 if (!should_continue) {
988 return;
989 }
990 }
991 if (kCount == CountTransitions::kYes) {
992 cur_depth_++;
993 }
994 }
995 if (num_frames_ != 0) {
996 CHECK_EQ(cur_depth_, num_frames_);
997 }
998 }
999
1000 template void StackVisitor::WalkStack<StackVisitor::CountTransitions::kYes>(bool);
1001 template void StackVisitor::WalkStack<StackVisitor::CountTransitions::kNo>(bool);
1002
1003 } // namespace art
1004