1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "android-base/logging.h"
18 #include "arch/context.h"
19 #include "arch/instruction_set.h"
20 #include "art_method-inl.h"
21 #include "art_method.h"
22 #include "base/callee_save_type.h"
23 #include "base/globals.h"
24 #include "base/pointer_size.h"
25 #include "callee_save_frame.h"
26 #include "common_throws.h"
27 #include "class_root-inl.h"
28 #include "debug_print.h"
29 #include "debugger.h"
30 #include "dex/dex_file-inl.h"
31 #include "dex/dex_file_types.h"
32 #include "dex/dex_instruction-inl.h"
33 #include "dex/method_reference.h"
34 #include "entrypoints/entrypoint_utils-inl.h"
35 #include "entrypoints/quick/callee_save_frame.h"
36 #include "entrypoints/runtime_asm_entrypoints.h"
37 #include "gc/accounting/card_table-inl.h"
38 #include "imt_conflict_table.h"
39 #include "imtable-inl.h"
40 #include "instrumentation.h"
41 #include "interpreter/interpreter.h"
42 #include "interpreter/interpreter_common.h"
43 #include "interpreter/shadow_frame-inl.h"
44 #include "jit/jit.h"
45 #include "jit/jit_code_cache.h"
46 #include "linear_alloc.h"
47 #include "method_handles.h"
48 #include "mirror/class-inl.h"
49 #include "mirror/dex_cache-inl.h"
50 #include "mirror/method.h"
51 #include "mirror/method_handle_impl.h"
52 #include "mirror/object-inl.h"
53 #include "mirror/object_array-inl.h"
54 #include "mirror/var_handle.h"
55 #include "oat/oat.h"
56 #include "oat/oat_file.h"
57 #include "oat/oat_quick_method_header.h"
58 #include "quick_exception_handler.h"
59 #include "runtime.h"
60 #include "scoped_thread_state_change-inl.h"
61 #include "stack.h"
62 #include "thread-inl.h"
63 #include "var_handles.h"
64 #include "well_known_classes.h"
65 #include "runtime_entrypoints_list.h"
66
67 namespace art HIDDEN {
68
69 // Visits the arguments as saved to the stack by a CalleeSaveType::kRefAndArgs callee save frame.
70 template <typename FrameInfo>
71 class QuickArgumentVisitorImpl {
72 // Number of bytes for each out register in the caller method's frame.
73 static constexpr size_t kBytesStackArgLocation = 4;
74 // Frame size in bytes of a callee-save frame for RefsAndArgs.
75 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize =
76 RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
77 // Offset of first GPR arg.
78 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
79 RuntimeCalleeSaveFrame::GetGpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
80 // Offset of first FPR arg.
81 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
82 RuntimeCalleeSaveFrame::GetFpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
83 // Offset of return address.
84 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_ReturnPcOffset =
85 RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveRefsAndArgs);
86
GprIndexToGprOffset(uint32_t gpr_index)87 static size_t GprIndexToGprOffset(uint32_t gpr_index) {
88 return FrameInfo::GprIndexToGprOffsetImpl(gpr_index);
89 }
90
91 static constexpr bool kSplitPairAcrossRegisterAndStack =
92 FrameInfo::kSplitPairAcrossRegisterAndStack;
93 static constexpr bool kAlignPairRegister = FrameInfo::kAlignPairRegister;
94 static constexpr bool kQuickSoftFloatAbi = FrameInfo::kQuickSoftFloatAbi;
95 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled =
96 FrameInfo::kQuickDoubleRegAlignedFloatBackFilled;
97 static constexpr bool kQuickSkipOddFpRegisters = FrameInfo::kQuickSkipOddFpRegisters;
98 static constexpr size_t kNumQuickGprArgs = FrameInfo::kNumQuickGprArgs;
99 static constexpr size_t kNumQuickFprArgs = FrameInfo::kNumQuickFprArgs;
100 static constexpr bool kGprFprLockstep = FrameInfo::kGprFprLockstep;
101 static constexpr bool kNaNBoxing = FrameInfo::kNanBoxing;
102
103 public:
NaNBoxing()104 static constexpr bool NaNBoxing() { return FrameInfo::kNaNBoxing; }
105
GetThisObjectReference(ArtMethod ** sp)106 static StackReference<mirror::Object>* GetThisObjectReference(ArtMethod** sp)
107 REQUIRES_SHARED(Locks::mutator_lock_) {
108 CHECK_GT(kNumQuickGprArgs, 0u);
109 constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR.
110 size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset +
111 GprIndexToGprOffset(kThisGprIndex);
112 uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset;
113 return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address);
114 }
115
GetCallingMethodAndDexPc(ArtMethod ** sp,uint32_t * dex_pc)116 static ArtMethod* GetCallingMethodAndDexPc(ArtMethod** sp, uint32_t* dex_pc)
117 REQUIRES_SHARED(Locks::mutator_lock_) {
118 DCHECK((*sp)->IsCalleeSaveMethod());
119 return GetCalleeSaveMethodCallerAndDexPc(sp, CalleeSaveType::kSaveRefsAndArgs, dex_pc);
120 }
121
GetCallingMethod(ArtMethod ** sp)122 static ArtMethod* GetCallingMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
123 uint32_t dex_pc;
124 return GetCallingMethodAndDexPc(sp, &dex_pc);
125 }
126
GetOuterMethod(ArtMethod ** sp)127 static ArtMethod* GetOuterMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
128 DCHECK((*sp)->IsCalleeSaveMethod());
129 uint8_t* previous_sp =
130 reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
131 return *reinterpret_cast<ArtMethod**>(previous_sp);
132 }
133
GetCallingPcAddr(ArtMethod ** sp)134 static uint8_t* GetCallingPcAddr(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
135 DCHECK((*sp)->IsCalleeSaveMethod());
136 uint8_t* return_adress_spill =
137 reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_ReturnPcOffset;
138 return return_adress_spill;
139 }
140
141 // For the given quick ref and args quick frame, return the caller's PC.
GetCallingPc(ArtMethod ** sp)142 static uintptr_t GetCallingPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
143 return *reinterpret_cast<uintptr_t*>(GetCallingPcAddr(sp));
144 }
145
QuickArgumentVisitorImpl(ArtMethod ** sp,bool is_static,std::string_view shorty)146 QuickArgumentVisitorImpl(ArtMethod** sp, bool is_static, std::string_view shorty)
147 REQUIRES_SHARED(Locks::mutator_lock_)
148 : is_static_(is_static),
149 shorty_(shorty),
150 gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
151 fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
152 stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize +
153 sizeof(ArtMethod*)), // Skip ArtMethod*.
154 gpr_index_(0),
155 fpr_index_(0),
156 fpr_double_index_(0),
157 stack_index_(0),
158 cur_type_(Primitive::kPrimVoid),
159 is_split_long_or_double_(false) {
160 static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0),
161 "Number of Quick FPR arguments unexpected");
162 static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled),
163 "Double alignment unexpected");
164 // For register alignment, we want to assume that counters(fpr_double_index_) are even if the
165 // next register is even.
166 static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0,
167 "Number of Quick FPR arguments not even");
168 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
169 }
170
~QuickArgumentVisitorImpl()171 virtual ~QuickArgumentVisitorImpl() {}
172
173 virtual void Visit() = 0;
174
GetParamPrimitiveType() const175 Primitive::Type GetParamPrimitiveType() const {
176 return cur_type_;
177 }
178
GetParamAddress() const179 uint8_t* GetParamAddress() const {
180 if (!kQuickSoftFloatAbi) {
181 Primitive::Type type = GetParamPrimitiveType();
182 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) {
183 if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) {
184 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) {
185 return fpr_args_ +
186 (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeQuickCodeISA));
187 }
188 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
189 return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeQuickCodeISA));
190 }
191 return stack_args_ + (stack_index_ * kBytesStackArgLocation);
192 }
193 }
194 if (gpr_index_ < kNumQuickGprArgs) {
195 return gpr_args_ + GprIndexToGprOffset(gpr_index_);
196 }
197 return stack_args_ + (stack_index_ * kBytesStackArgLocation);
198 }
199
IsSplitLongOrDouble() const200 bool IsSplitLongOrDouble() const {
201 if ((GetBytesPerGprSpillLocation(kRuntimeQuickCodeISA) == 4) ||
202 (GetBytesPerFprSpillLocation(kRuntimeQuickCodeISA) == 4)) {
203 return is_split_long_or_double_;
204 } else {
205 return false; // An optimization for when GPR and FPRs are 64bit.
206 }
207 }
208
IsParamAReference() const209 bool IsParamAReference() const {
210 return GetParamPrimitiveType() == Primitive::kPrimNot;
211 }
212
IsParamALongOrDouble() const213 bool IsParamALongOrDouble() const {
214 Primitive::Type type = GetParamPrimitiveType();
215 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
216 }
217
ReadSplitLongParam() const218 uint64_t ReadSplitLongParam() const {
219 // The splitted long is always available through the stack.
220 return *reinterpret_cast<uint64_t*>(stack_args_
221 + stack_index_ * kBytesStackArgLocation);
222 }
223
IncGprIndex()224 void IncGprIndex() {
225 gpr_index_++;
226 if (kGprFprLockstep) {
227 fpr_index_++;
228 }
229 }
230
IncFprIndex()231 void IncFprIndex() {
232 fpr_index_++;
233 if (kGprFprLockstep) {
234 gpr_index_++;
235 }
236 }
237
VisitArguments()238 void VisitArguments() REQUIRES_SHARED(Locks::mutator_lock_) {
239 // (a) 'stack_args_' should point to the first method's argument
240 // (b) whatever the argument type it is, the 'stack_index_' should
241 // be moved forward along with every visiting.
242 gpr_index_ = 0;
243 fpr_index_ = 0;
244 if (kQuickDoubleRegAlignedFloatBackFilled) {
245 fpr_double_index_ = 0;
246 }
247 stack_index_ = 0;
248 if (!is_static_) { // Handle this.
249 cur_type_ = Primitive::kPrimNot;
250 is_split_long_or_double_ = false;
251 Visit();
252 stack_index_++;
253 if (kNumQuickGprArgs > 0) {
254 IncGprIndex();
255 }
256 }
257 for (char c : shorty_.substr(1u)) {
258 cur_type_ = Primitive::GetType(c);
259 switch (cur_type_) {
260 case Primitive::kPrimNot:
261 case Primitive::kPrimBoolean:
262 case Primitive::kPrimByte:
263 case Primitive::kPrimChar:
264 case Primitive::kPrimShort:
265 case Primitive::kPrimInt:
266 is_split_long_or_double_ = false;
267 Visit();
268 stack_index_++;
269 if (gpr_index_ < kNumQuickGprArgs) {
270 IncGprIndex();
271 }
272 break;
273 case Primitive::kPrimFloat:
274 is_split_long_or_double_ = false;
275 Visit();
276 stack_index_++;
277 if (kQuickSoftFloatAbi) {
278 if (gpr_index_ < kNumQuickGprArgs) {
279 IncGprIndex();
280 }
281 } else {
282 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
283 IncFprIndex();
284 if (kQuickDoubleRegAlignedFloatBackFilled) {
285 // Double should not overlap with float.
286 // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4.
287 fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2));
288 // Float should not overlap with double.
289 if (fpr_index_ % 2 == 0) {
290 fpr_index_ = std::max(fpr_double_index_, fpr_index_);
291 }
292 } else if (kQuickSkipOddFpRegisters) {
293 IncFprIndex();
294 }
295 }
296 }
297 break;
298 case Primitive::kPrimDouble:
299 case Primitive::kPrimLong:
300 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) {
301 if (cur_type_ == Primitive::kPrimLong &&
302 gpr_index_ == 0 &&
303 kAlignPairRegister) {
304 // Currently, this is only for ARM, where we align long parameters with
305 // even-numbered registers by skipping R1 and using R2 instead.
306 IncGprIndex();
307 }
308 is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeQuickCodeISA) == 4) &&
309 ((gpr_index_ + 1) == kNumQuickGprArgs);
310 if (!kSplitPairAcrossRegisterAndStack && is_split_long_or_double_) {
311 // We don't want to split this. Pass over this register.
312 gpr_index_++;
313 is_split_long_or_double_ = false;
314 }
315 Visit();
316 if (kBytesStackArgLocation == 4) {
317 stack_index_+= 2;
318 } else {
319 CHECK_EQ(kBytesStackArgLocation, 8U);
320 stack_index_++;
321 }
322 if (gpr_index_ < kNumQuickGprArgs) {
323 IncGprIndex();
324 if (GetBytesPerGprSpillLocation(kRuntimeQuickCodeISA) == 4) {
325 if (gpr_index_ < kNumQuickGprArgs) {
326 IncGprIndex();
327 }
328 }
329 }
330 } else {
331 is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeQuickCodeISA) == 4) &&
332 ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled;
333 Visit();
334 if (kBytesStackArgLocation == 4) {
335 stack_index_+= 2;
336 } else {
337 CHECK_EQ(kBytesStackArgLocation, 8U);
338 stack_index_++;
339 }
340 if (kQuickDoubleRegAlignedFloatBackFilled) {
341 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) {
342 fpr_double_index_ += 2;
343 // Float should not overlap with double.
344 if (fpr_index_ % 2 == 0) {
345 fpr_index_ = std::max(fpr_double_index_, fpr_index_);
346 }
347 }
348 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
349 IncFprIndex();
350 if (GetBytesPerFprSpillLocation(kRuntimeQuickCodeISA) == 4) {
351 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
352 IncFprIndex();
353 }
354 }
355 }
356 }
357 break;
358 default:
359 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_;
360 }
361 }
362 }
363
364 protected:
365 const bool is_static_;
366 const std::string_view shorty_;
367
368 private:
369 uint8_t* const gpr_args_; // Address of GPR arguments in callee save frame.
370 uint8_t* const fpr_args_; // Address of FPR arguments in callee save frame.
371 uint8_t* const stack_args_; // Address of stack arguments in caller's frame.
372 uint32_t gpr_index_; // Index into spilled GPRs.
373 // Index into spilled FPRs.
374 // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_
375 // holds a higher register number.
376 uint32_t fpr_index_;
377 // Index into spilled FPRs for aligned double.
378 // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in
379 // terms of singles, may be behind fpr_index.
380 uint32_t fpr_double_index_;
381 uint32_t stack_index_; // Index into arguments on the stack.
382 // The current type of argument during VisitArguments.
383 Primitive::Type cur_type_;
384 // Does a 64bit parameter straddle the register and stack arguments?
385 bool is_split_long_or_double_;
386 };
387
388 class QuickArgumentFrameInfoARM {
389 public:
390 // The callee save frame is pointed to by SP.
391 // | argN | |
392 // | ... | |
393 // | arg4 | |
394 // | arg3 spill | | Caller's frame
395 // | arg2 spill | |
396 // | arg1 spill | |
397 // | Method* | ---
398 // | LR |
399 // | ... | 4x6 bytes callee saves
400 // | R3 |
401 // | R2 |
402 // | R1 |
403 // | S15 |
404 // | : |
405 // | S0 |
406 // | | 4x2 bytes padding
407 // | Method* | <- sp
408 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
409 static constexpr bool kAlignPairRegister = true;
410 static constexpr bool kQuickSoftFloatAbi = false;
411 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = true;
412 static constexpr bool kQuickSkipOddFpRegisters = false;
413 static constexpr size_t kNumQuickGprArgs = 3;
414 static constexpr size_t kNumQuickFprArgs = 16;
415 static constexpr bool kGprFprLockstep = false;
416 static constexpr bool kNaNBoxing = false;
GprIndexToGprOffsetImpl(uint32_t gpr_index)417 static size_t GprIndexToGprOffsetImpl(uint32_t gpr_index) {
418 return gpr_index * GetBytesPerGprSpillLocation(InstructionSet::kArm);
419 }
420 };
421
422 class QuickArgumentFrameInfoARM64 {
423 public:
424 // The callee save frame is pointed to by SP.
425 // | argN | |
426 // | ... | |
427 // | arg4 | |
428 // | arg3 spill | | Caller's frame
429 // | arg2 spill | |
430 // | arg1 spill | |
431 // | Method* | ---
432 // | LR |
433 // | X29 |
434 // | : |
435 // | X20 |
436 // | X7 |
437 // | : |
438 // | X1 |
439 // | D7 |
440 // | : |
441 // | D0 |
442 // | | padding
443 // | Method* | <- sp
444 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
445 static constexpr bool kAlignPairRegister = false;
446 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI.
447 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
448 static constexpr bool kQuickSkipOddFpRegisters = false;
449 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs.
450 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs.
451 static constexpr bool kGprFprLockstep = false;
452 static constexpr bool kNaNBoxing = false;
GprIndexToGprOffsetImpl(uint32_t gpr_index)453 static size_t GprIndexToGprOffsetImpl(uint32_t gpr_index) {
454 return gpr_index * GetBytesPerGprSpillLocation(InstructionSet::kArm64);
455 }
456 };
457
458 class QuickArgumentFrameInfoRISCV64 {
459 public:
460 // The callee save frame is pointed to by SP.
461 // | argN | |
462 // | ... | |
463 // | reg. arg spills | | Caller's frame
464 // | Method* | ---
465 // | RA |
466 // | S11/X27 | callee-saved 11
467 // | S10/X26 | callee-saved 10
468 // | S9/X25 | callee-saved 9
469 // | S9/X24 | callee-saved 8
470 // | S7/X23 | callee-saved 7
471 // | S6/X22 | callee-saved 6
472 // | S5/X21 | callee-saved 5
473 // | S4/X20 | callee-saved 4
474 // | S3/X19 | callee-saved 3
475 // | S2/X18 | callee-saved 2
476 // | A7/X17 | arg 7
477 // | A6/X16 | arg 6
478 // | A5/X15 | arg 5
479 // | A4/X14 | arg 4
480 // | A3/X13 | arg 3
481 // | A2/X12 | arg 2
482 // | A1/X11 | arg 1 (A0 is the method => skipped)
483 // | S0/X8/FP | callee-saved 0 (S1 is TR => skipped)
484 // | FA7 | float arg 8
485 // | FA6 | float arg 7
486 // | FA5 | float arg 6
487 // | FA4 | float arg 5
488 // | FA3 | float arg 4
489 // | FA2 | float arg 3
490 // | FA1 | float arg 2
491 // | FA0 | float arg 1
492 // | A0/Method* | <- sp
493 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
494 static constexpr bool kAlignPairRegister = false;
495 static constexpr bool kQuickSoftFloatAbi = false;
496 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
497 static constexpr bool kQuickSkipOddFpRegisters = false;
498 static constexpr size_t kNumQuickGprArgs = 7;
499 static constexpr size_t kNumQuickFprArgs = 8;
500 static constexpr bool kGprFprLockstep = false;
501 static constexpr bool kNaNBoxing = true;
GprIndexToGprOffsetImpl(uint32_t gpr_index)502 static size_t GprIndexToGprOffsetImpl(uint32_t gpr_index) {
503 // skip S0/X8/FP
504 return (gpr_index + 1) * GetBytesPerGprSpillLocation(InstructionSet::kRiscv64);
505 }
506 };
507
508 class QuickArgumentFrameInfoX86 {
509 public:
510 // The callee save frame is pointed to by SP.
511 // | argN | |
512 // | ... | |
513 // | arg4 | |
514 // | arg3 spill | | Caller's frame
515 // | arg2 spill | |
516 // | arg1 spill | |
517 // | Method* | ---
518 // | Return |
519 // | EBP,ESI,EDI | callee saves
520 // | EBX | arg3
521 // | EDX | arg2
522 // | ECX | arg1
523 // | XMM3 | float arg 4
524 // | XMM2 | float arg 3
525 // | XMM1 | float arg 2
526 // | XMM0 | float arg 1
527 // | EAX/Method* | <- sp
528 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
529 static constexpr bool kAlignPairRegister = false;
530 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI.
531 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
532 static constexpr bool kQuickSkipOddFpRegisters = false;
533 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs.
534 static constexpr size_t kNumQuickFprArgs = 4; // 4 arguments passed in FPRs.
535 static constexpr bool kGprFprLockstep = false;
536 static constexpr bool kNaNBoxing = false;
GprIndexToGprOffsetImpl(uint32_t gpr_index)537 static size_t GprIndexToGprOffsetImpl(uint32_t gpr_index) {
538 return gpr_index * GetBytesPerGprSpillLocation(InstructionSet::kX86);
539 }
540 };
541
542 class QuickArgumentFrameInfoX86_64 {
543 public:
544 // The callee save frame is pointed to by SP.
545 // | argN | |
546 // | ... | |
547 // | reg. arg spills | | Caller's frame
548 // | Method* | ---
549 // | Return |
550 // | R15 | callee save
551 // | R14 | callee save
552 // | R13 | callee save
553 // | R12 | callee save
554 // | R9 | arg5
555 // | R8 | arg4
556 // | RSI/R6 | arg1
557 // | RBP/R5 | callee save
558 // | RBX/R3 | callee save
559 // | RDX/R2 | arg2
560 // | RCX/R1 | arg3
561 // | XMM15 | callee save
562 // | XMM14 | callee save
563 // | XMM13 | callee save
564 // | XMM12 | callee save
565 // | XMM7 | float arg 8
566 // | XMM6 | float arg 7
567 // | XMM5 | float arg 6
568 // | XMM4 | float arg 5
569 // | XMM3 | float arg 4
570 // | XMM2 | float arg 3
571 // | XMM1 | float arg 2
572 // | XMM0 | float arg 1
573 // | Padding |
574 // | RDI/Method* | <- sp
575 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
576 static constexpr bool kAlignPairRegister = false;
577 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI.
578 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
579 static constexpr bool kQuickSkipOddFpRegisters = false;
580 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs.
581 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs.
582 static constexpr bool kGprFprLockstep = false;
583 static constexpr bool kNaNBoxing = false;
GprIndexToGprOffsetImpl(uint32_t gpr_index)584 static size_t GprIndexToGprOffsetImpl(uint32_t gpr_index) {
585 static constexpr size_t kBytesPerSpill = GetBytesPerGprSpillLocation(InstructionSet::kX86_64);
586 switch (gpr_index) {
587 case 0: return (4 * kBytesPerSpill);
588 case 1: return (1 * kBytesPerSpill);
589 case 2: return (0 * kBytesPerSpill);
590 case 3: return (5 * kBytesPerSpill);
591 case 4: return (6 * kBytesPerSpill);
592 default:
593 LOG(FATAL) << "Unexpected GPR index: " << gpr_index;
594 UNREACHABLE();
595 }
596 }
597 };
598
599 namespace detail {
600
601 template <InstructionSet>
602 struct QAFISelector;
603
604 template <>
605 struct QAFISelector<InstructionSet::kArm> { using type = QuickArgumentFrameInfoARM; };
606 template <>
607 struct QAFISelector<InstructionSet::kArm64> { using type = QuickArgumentFrameInfoARM64; };
608 template <>
609 struct QAFISelector<InstructionSet::kRiscv64> { using type = QuickArgumentFrameInfoRISCV64; };
610 template <>
611 struct QAFISelector<InstructionSet::kX86> { using type = QuickArgumentFrameInfoX86; };
612 template <>
613 struct QAFISelector<InstructionSet::kX86_64> { using type = QuickArgumentFrameInfoX86_64; };
614
615 } // namespace detail
616
617 using QuickArgumentVisitor =
618 QuickArgumentVisitorImpl<detail::QAFISelector<kRuntimeQuickCodeISA>::type>;
619
620 // Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It
621 // allows to use the QuickArgumentVisitor constants without moving all the code in its own module.
artQuickGetProxyThisObject(ArtMethod ** sp)622 extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
623 REQUIRES_SHARED(Locks::mutator_lock_) {
624 DCHECK((*sp)->IsProxyMethod());
625 return QuickArgumentVisitor::GetThisObjectReference(sp)->AsMirrorPtr();
626 }
627
628 // Visits arguments on the stack placing them into the shadow frame.
629 class BuildQuickShadowFrameVisitor final : public QuickArgumentVisitor {
630 public:
BuildQuickShadowFrameVisitor(ArtMethod ** sp,bool is_static,std::string_view shorty,ShadowFrame * sf,size_t first_arg_reg)631 BuildQuickShadowFrameVisitor(ArtMethod** sp,
632 bool is_static,
633 std::string_view shorty,
634 ShadowFrame* sf,
635 size_t first_arg_reg)
636 : QuickArgumentVisitor(sp, is_static, shorty), sf_(sf), cur_reg_(first_arg_reg) {}
637
638 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
639 void SetReceiver(ObjPtr<mirror::Object> receiver) REQUIRES_SHARED(Locks::mutator_lock_);
640
641 private:
642 ShadowFrame* const sf_;
643 uint32_t cur_reg_;
644
645 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor);
646 };
647
SetReceiver(ObjPtr<mirror::Object> receiver)648 void BuildQuickShadowFrameVisitor::SetReceiver(ObjPtr<mirror::Object> receiver) {
649 DCHECK_EQ(cur_reg_, 0u);
650 sf_->SetVRegReference(cur_reg_, receiver);
651 ++cur_reg_;
652 }
653
Visit()654 void BuildQuickShadowFrameVisitor::Visit() {
655 Primitive::Type type = GetParamPrimitiveType();
656 switch (type) {
657 case Primitive::kPrimLong: // Fall-through.
658 case Primitive::kPrimDouble:
659 if (IsSplitLongOrDouble()) {
660 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam());
661 } else {
662 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
663 }
664 ++cur_reg_;
665 break;
666 case Primitive::kPrimNot: {
667 StackReference<mirror::Object>* stack_ref =
668 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
669 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr());
670 }
671 break;
672 case Primitive::kPrimBoolean: // Fall-through.
673 case Primitive::kPrimByte: // Fall-through.
674 case Primitive::kPrimChar: // Fall-through.
675 case Primitive::kPrimShort: // Fall-through.
676 case Primitive::kPrimInt: // Fall-through.
677 case Primitive::kPrimFloat:
678 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
679 break;
680 case Primitive::kPrimVoid:
681 LOG(FATAL) << "UNREACHABLE";
682 UNREACHABLE();
683 }
684 ++cur_reg_;
685 }
686
687 // Don't inline. See b/65159206.
688 NO_INLINE
HandleDeoptimization(JValue * result,ArtMethod * method,ShadowFrame * deopt_frame,ManagedStack * fragment)689 static void HandleDeoptimization(JValue* result,
690 ArtMethod* method,
691 ShadowFrame* deopt_frame,
692 ManagedStack* fragment)
693 REQUIRES_SHARED(Locks::mutator_lock_) {
694 // Coming from partial-fragment deopt.
695 Thread* self = Thread::Current();
696 if (kIsDebugBuild) {
697 // Consistency-check: are the methods as expected? We check that the last shadow frame
698 // (the bottom of the call-stack) corresponds to the called method.
699 ShadowFrame* linked = deopt_frame;
700 while (linked->GetLink() != nullptr) {
701 linked = linked->GetLink();
702 }
703 CHECK_EQ(method, linked->GetMethod()) << method->PrettyMethod() << " "
704 << ArtMethod::PrettyMethod(linked->GetMethod());
705 }
706
707 if (VLOG_IS_ON(deopt)) {
708 // Print out the stack to verify that it was a partial-fragment deopt.
709 LOG(INFO) << "Continue-ing from deopt. Stack is:";
710 QuickExceptionHandler::DumpFramesWithType(self, true);
711 }
712
713 ObjPtr<mirror::Throwable> pending_exception;
714 bool from_code = false;
715 DeoptimizationMethodType method_type;
716 self->PopDeoptimizationContext(/* out */ result,
717 /* out */ &pending_exception,
718 /* out */ &from_code,
719 /* out */ &method_type);
720
721 // Push a transition back into managed code onto the linked list in thread.
722 self->PushManagedStackFragment(fragment);
723
724 // Ensure that the stack is still in order.
725 if (kIsDebugBuild) {
726 class EntireStackVisitor : public StackVisitor {
727 public:
728 explicit EntireStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_)
729 : StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
730
731 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
732 // Nothing to do here. In a debug build, ValidateFrame will do the work in the walking
733 // logic. Just always say we want to continue.
734 return true;
735 }
736 };
737 EntireStackVisitor esv(self);
738 esv.WalkStack();
739 }
740
741 // Restore the exception that was pending before deoptimization then interpret the
742 // deoptimized frames.
743 if (pending_exception != nullptr) {
744 self->SetException(pending_exception);
745 }
746 interpreter::EnterInterpreterFromDeoptimize(self,
747 deopt_frame,
748 result,
749 from_code,
750 method_type);
751 }
752
NanBoxResultIfNeeded(int64_t result,char result_shorty)753 static int64_t NanBoxResultIfNeeded(int64_t result, char result_shorty) {
754 return (QuickArgumentVisitor::NaNBoxing() && result_shorty == 'F')
755 ? result | UINT64_C(0xffffffff00000000)
756 : result;
757 }
758
759 NO_STACK_PROTECTOR
artQuickToInterpreterBridge(ArtMethod * method,Thread * self,ArtMethod ** sp)760 extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp)
761 REQUIRES_SHARED(Locks::mutator_lock_) {
762 // Ensure we don't get thread suspension until the object arguments are safely in the shadow
763 // frame.
764 ScopedQuickEntrypointChecks sqec(self);
765
766 if (UNLIKELY(!method->IsInvokable())) {
767 method->ThrowInvocationTimeError(
768 method->IsStatic()
769 ? nullptr
770 : QuickArgumentVisitor::GetThisObjectReference(sp)->AsMirrorPtr());
771 return 0;
772 }
773
774 DCHECK(!method->IsNative()) << method->PrettyMethod();
775
776 JValue result;
777
778 ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
779 DCHECK(non_proxy_method->GetCodeItem() != nullptr) << method->PrettyMethod();
780 std::string_view shorty = non_proxy_method->GetShortyView();
781
782 ManagedStack fragment;
783 ShadowFrame* deopt_frame = self->MaybePopDeoptimizedStackedShadowFrame();
784 if (UNLIKELY(deopt_frame != nullptr)) {
785 HandleDeoptimization(&result, method, deopt_frame, &fragment);
786 } else {
787 CodeItemDataAccessor accessor(non_proxy_method->DexInstructionData());
788 const char* old_cause = self->StartAssertNoThreadSuspension(
789 "Building interpreter shadow frame");
790 uint16_t num_regs = accessor.RegistersSize();
791 // No last shadow coming from quick.
792 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
793 CREATE_SHADOW_FRAME(num_regs, method, /* dex_pc= */ 0);
794 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
795 size_t first_arg_reg = accessor.RegistersSize() - accessor.InsSize();
796 BuildQuickShadowFrameVisitor shadow_frame_builder(
797 sp, method->IsStatic(), shorty, shadow_frame, first_arg_reg);
798 shadow_frame_builder.VisitArguments();
799 self->EndAssertNoThreadSuspension(old_cause);
800
801 // Potentially run <clinit> before pushing the shadow frame. We do not want
802 // to have the called method on the stack if there is an exception.
803 if (!EnsureInitialized(self, shadow_frame)) {
804 DCHECK(self->IsExceptionPending());
805 return 0;
806 }
807
808 // Push a transition back into managed code onto the linked list in thread.
809 self->PushManagedStackFragment(&fragment);
810 self->PushShadowFrame(shadow_frame);
811 result = interpreter::EnterInterpreterFromEntryPoint(self, accessor, shadow_frame);
812 }
813
814 // Pop transition.
815 self->PopManagedStackFragment(fragment);
816
817 // Check if caller needs to be deoptimized for instrumentation reasons.
818 instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
819 if (UNLIKELY(instr->ShouldDeoptimizeCaller(self, sp))) {
820 ArtMethod* caller = QuickArgumentVisitor::GetOuterMethod(sp);
821 uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp);
822 DCHECK(Runtime::Current()->IsAsyncDeoptimizeable(caller, caller_pc));
823 DCHECK(caller != nullptr);
824 DCHECK(self->GetException() != Thread::GetDeoptimizationException());
825 // Push the context of the deoptimization stack so we can restore the return value and the
826 // exception before executing the deoptimized frames.
827 self->PushDeoptimizationContext(result,
828 shorty[0] == 'L' || shorty[0] == '[', // class or array
829 self->GetException(),
830 /* from_code= */ false,
831 DeoptimizationMethodType::kDefault);
832
833 // Set special exception to cause deoptimization.
834 self->SetException(Thread::GetDeoptimizationException());
835 }
836
837 // No need to restore the args since the method has already been run by the interpreter.
838 return NanBoxResultIfNeeded(result.GetJ(), shorty[0]);
839 }
840
841 // Visits arguments on the stack placing them into the args vector, Object* arguments are converted
842 // to jobjects.
843 class BuildQuickArgumentVisitor final : public QuickArgumentVisitor {
844 public:
BuildQuickArgumentVisitor(ArtMethod ** sp,bool is_static,std::string_view shorty,ScopedObjectAccessUnchecked * soa,std::vector<jvalue> * args)845 BuildQuickArgumentVisitor(ArtMethod** sp,
846 bool is_static,
847 std::string_view shorty,
848 ScopedObjectAccessUnchecked* soa,
849 std::vector<jvalue>* args)
850 : QuickArgumentVisitor(sp, is_static, shorty), soa_(soa), args_(args) {}
851
852 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
853
854 private:
855 ScopedObjectAccessUnchecked* const soa_;
856 std::vector<jvalue>* const args_;
857
858 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
859 };
860
Visit()861 void BuildQuickArgumentVisitor::Visit() {
862 jvalue val;
863 Primitive::Type type = GetParamPrimitiveType();
864 switch (type) {
865 case Primitive::kPrimNot: {
866 StackReference<mirror::Object>* stack_ref =
867 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
868 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
869 break;
870 }
871 case Primitive::kPrimLong: // Fall-through.
872 case Primitive::kPrimDouble:
873 if (IsSplitLongOrDouble()) {
874 val.j = ReadSplitLongParam();
875 } else {
876 val.j = *reinterpret_cast<jlong*>(GetParamAddress());
877 }
878 break;
879 case Primitive::kPrimBoolean: // Fall-through.
880 case Primitive::kPrimByte: // Fall-through.
881 case Primitive::kPrimChar: // Fall-through.
882 case Primitive::kPrimShort: // Fall-through.
883 case Primitive::kPrimInt: // Fall-through.
884 case Primitive::kPrimFloat:
885 val.i = *reinterpret_cast<jint*>(GetParamAddress());
886 break;
887 case Primitive::kPrimVoid:
888 LOG(FATAL) << "UNREACHABLE";
889 UNREACHABLE();
890 }
891 args_->push_back(val);
892 }
893
894 // Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
895 // which is responsible for recording callee save registers. We explicitly place into jobjects the
896 // incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
897 // field within the proxy object, which will box the primitive arguments and deal with error cases.
artQuickProxyInvokeHandler(ArtMethod * proxy_method,mirror::Object * receiver,Thread * self,ArtMethod ** sp)898 extern "C" uint64_t artQuickProxyInvokeHandler(
899 ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp)
900 REQUIRES_SHARED(Locks::mutator_lock_) {
901 DCHECK(proxy_method->IsProxyMethod()) << proxy_method->PrettyMethod();
902 DCHECK(receiver->GetClass()->IsProxyClass()) << proxy_method->PrettyMethod();
903 // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
904 const char* old_cause =
905 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
906 // Register the top of the managed stack, making stack crawlable.
907 DCHECK_EQ((*sp), proxy_method) << proxy_method->PrettyMethod();
908 self->VerifyStack();
909 // Start new JNI local reference state.
910 JNIEnvExt* env = self->GetJniEnv();
911 ScopedObjectAccessUnchecked soa(env);
912 ScopedJniEnvLocalRefState env_state(env);
913 // Create local ref. copies of proxy method and the receiver.
914 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
915
916 // Placing arguments into args vector and remove the receiver.
917 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
918 CHECK(!non_proxy_method->IsStatic()) << proxy_method->PrettyMethod() << " "
919 << non_proxy_method->PrettyMethod();
920 std::vector<jvalue> args;
921 uint32_t shorty_len = 0;
922 const char* raw_shorty = non_proxy_method->GetShorty(&shorty_len);
923 std::string_view shorty(raw_shorty, shorty_len);
924 BuildQuickArgumentVisitor local_ref_visitor(sp, /* is_static= */ false, shorty, &soa, &args);
925
926 local_ref_visitor.VisitArguments();
927 DCHECK_GT(args.size(), 0U) << proxy_method->PrettyMethod();
928 args.erase(args.begin());
929
930 // Convert proxy method into expected interface method.
931 ArtMethod* interface_method = proxy_method->FindOverriddenMethod(kRuntimePointerSize);
932 DCHECK(interface_method != nullptr) << proxy_method->PrettyMethod();
933 DCHECK(!interface_method->IsProxyMethod()) << interface_method->PrettyMethod();
934 self->EndAssertNoThreadSuspension(old_cause);
935 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
936 DCHECK(!Runtime::Current()->IsActiveTransaction());
937 ObjPtr<mirror::Method> interface_reflect_method =
938 mirror::Method::CreateFromArtMethod<kRuntimePointerSize>(soa.Self(), interface_method);
939 if (interface_reflect_method == nullptr) {
940 soa.Self()->AssertPendingOOMException();
941 return 0;
942 }
943 jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_reflect_method);
944
945 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
946 // that performs allocations or instrumentation events.
947 instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
948 if (instr->HasMethodEntryListeners()) {
949 instr->MethodEnterEvent(soa.Self(), proxy_method);
950 if (soa.Self()->IsExceptionPending()) {
951 instr->MethodUnwindEvent(self,
952 proxy_method,
953 0);
954 return 0;
955 }
956 }
957 JValue result =
958 InvokeProxyInvocationHandler(soa, raw_shorty, rcvr_jobj, interface_method_jobj, args);
959 if (soa.Self()->IsExceptionPending()) {
960 if (instr->HasMethodUnwindListeners()) {
961 instr->MethodUnwindEvent(self,
962 proxy_method,
963 0);
964 }
965 } else if (instr->HasMethodExitListeners()) {
966 instr->MethodExitEvent(self,
967 proxy_method,
968 {},
969 result);
970 }
971
972 return NanBoxResultIfNeeded(result.GetJ(), shorty[0]);
973 }
974
975 // Visitor returning a reference argument at a given position in a Quick stack frame.
976 // NOTE: Only used for testing purposes.
977 class GetQuickReferenceArgumentAtVisitor final : public QuickArgumentVisitor {
978 public:
GetQuickReferenceArgumentAtVisitor(ArtMethod ** sp,std::string_view shorty,size_t arg_pos)979 GetQuickReferenceArgumentAtVisitor(ArtMethod** sp, std::string_view shorty, size_t arg_pos)
980 : QuickArgumentVisitor(sp, /* is_static= */ false, shorty),
981 cur_pos_(0u),
982 arg_pos_(arg_pos),
983 ref_arg_(nullptr) {
984 CHECK_LT(arg_pos, shorty.length()) << "Argument position greater than the number arguments";
985 }
986
Visit()987 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override {
988 if (cur_pos_ == arg_pos_) {
989 Primitive::Type type = GetParamPrimitiveType();
990 CHECK_EQ(type, Primitive::kPrimNot) << "Argument at searched position is not a reference";
991 ref_arg_ = reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
992 }
993 ++cur_pos_;
994 }
995
GetReferenceArgument()996 StackReference<mirror::Object>* GetReferenceArgument() {
997 return ref_arg_;
998 }
999
1000 private:
1001 // The position of the currently visited argument.
1002 size_t cur_pos_;
1003 // The position of the searched argument.
1004 const size_t arg_pos_;
1005 // The reference argument, if found.
1006 StackReference<mirror::Object>* ref_arg_;
1007
1008 DISALLOW_COPY_AND_ASSIGN(GetQuickReferenceArgumentAtVisitor);
1009 };
1010
1011 // Returning reference argument at position `arg_pos` in Quick stack frame at address `sp`.
1012 // NOTE: Only used for testing purposes.
artQuickGetProxyReferenceArgumentAt(size_t arg_pos,ArtMethod ** sp)1013 EXPORT extern "C" StackReference<mirror::Object>* artQuickGetProxyReferenceArgumentAt(
1014 size_t arg_pos, ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
1015 ArtMethod* proxy_method = *sp;
1016 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
1017 CHECK(!non_proxy_method->IsStatic())
1018 << proxy_method->PrettyMethod() << " " << non_proxy_method->PrettyMethod();
1019 std::string_view shorty = non_proxy_method->GetShortyView();
1020 GetQuickReferenceArgumentAtVisitor ref_arg_visitor(sp, shorty, arg_pos);
1021 ref_arg_visitor.VisitArguments();
1022 StackReference<mirror::Object>* ref_arg = ref_arg_visitor.GetReferenceArgument();
1023 return ref_arg;
1024 }
1025
1026 // Visitor returning all the reference arguments in a Quick stack frame.
1027 class GetQuickReferenceArgumentsVisitor final : public QuickArgumentVisitor {
1028 public:
GetQuickReferenceArgumentsVisitor(ArtMethod ** sp,bool is_static,std::string_view shorty)1029 GetQuickReferenceArgumentsVisitor(ArtMethod** sp, bool is_static, std::string_view shorty)
1030 : QuickArgumentVisitor(sp, is_static, shorty) {}
1031
Visit()1032 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override {
1033 Primitive::Type type = GetParamPrimitiveType();
1034 if (type == Primitive::kPrimNot) {
1035 StackReference<mirror::Object>* ref_arg =
1036 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
1037 ref_args_.push_back(ref_arg);
1038 }
1039 }
1040
GetReferenceArguments()1041 std::vector<StackReference<mirror::Object>*> GetReferenceArguments() {
1042 return ref_args_;
1043 }
1044
1045 private:
1046 // The reference arguments.
1047 std::vector<StackReference<mirror::Object>*> ref_args_;
1048
1049 DISALLOW_COPY_AND_ASSIGN(GetQuickReferenceArgumentsVisitor);
1050 };
1051
1052 // Returning all reference arguments in Quick stack frame at address `sp`.
GetProxyReferenceArguments(ArtMethod ** sp)1053 std::vector<StackReference<mirror::Object>*> GetProxyReferenceArguments(ArtMethod** sp)
1054 REQUIRES_SHARED(Locks::mutator_lock_) {
1055 ArtMethod* proxy_method = *sp;
1056 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
1057 CHECK(!non_proxy_method->IsStatic())
1058 << proxy_method->PrettyMethod() << " " << non_proxy_method->PrettyMethod();
1059 std::string_view shorty = non_proxy_method->GetShortyView();
1060 GetQuickReferenceArgumentsVisitor ref_args_visitor(sp, /*is_static=*/ false, shorty);
1061 ref_args_visitor.VisitArguments();
1062 std::vector<StackReference<mirror::Object>*> ref_args = ref_args_visitor.GetReferenceArguments();
1063 return ref_args;
1064 }
1065
1066 // Read object references held in arguments from quick frames and place in a JNI local references,
1067 // so they don't get garbage collected.
1068 class RememberForGcArgumentVisitor final : public QuickArgumentVisitor {
1069 public:
RememberForGcArgumentVisitor(ArtMethod ** sp,bool is_static,std::string_view shorty,ScopedObjectAccessUnchecked * soa)1070 RememberForGcArgumentVisitor(ArtMethod** sp,
1071 bool is_static,
1072 std::string_view shorty,
1073 ScopedObjectAccessUnchecked* soa)
1074 : QuickArgumentVisitor(sp, is_static, shorty), soa_(soa) {}
1075
1076 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
1077
1078 void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_);
1079
1080 private:
1081 ScopedObjectAccessUnchecked* const soa_;
1082 // References which we must update when exiting in case the GC moved the objects.
1083 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_;
1084
1085 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor);
1086 };
1087
Visit()1088 void RememberForGcArgumentVisitor::Visit() {
1089 if (IsParamAReference()) {
1090 StackReference<mirror::Object>* stack_ref =
1091 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
1092 jobject reference =
1093 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
1094 references_.push_back(std::make_pair(reference, stack_ref));
1095 }
1096 }
1097
FixupReferences()1098 void RememberForGcArgumentVisitor::FixupReferences() {
1099 // Fixup any references which may have changed.
1100 for (const auto& pair : references_) {
1101 pair.second->Assign(soa_->Decode<mirror::Object>(pair.first));
1102 soa_->Env()->DeleteLocalRef(pair.first);
1103 }
1104 }
1105
DumpInstruction(ArtMethod * method,uint32_t dex_pc)1106 static std::string DumpInstruction(ArtMethod* method, uint32_t dex_pc)
1107 REQUIRES_SHARED(Locks::mutator_lock_) {
1108 if (dex_pc == static_cast<uint32_t>(-1)) {
1109 CHECK(method == WellKnownClasses::java_lang_String_charAt);
1110 return "<native>";
1111 } else {
1112 CodeItemInstructionAccessor accessor = method->DexInstructions();
1113 CHECK_LT(dex_pc, accessor.InsnsSizeInCodeUnits());
1114 return accessor.InstructionAt(dex_pc).DumpString(method->GetDexFile());
1115 }
1116 }
1117
DumpB74410240ClassData(ObjPtr<mirror::Class> klass)1118 static void DumpB74410240ClassData(ObjPtr<mirror::Class> klass)
1119 REQUIRES_SHARED(Locks::mutator_lock_) {
1120 std::string storage;
1121 const char* descriptor = klass->GetDescriptor(&storage);
1122 LOG(FATAL_WITHOUT_ABORT) << " " << DescribeLoaders(klass->GetClassLoader(), descriptor);
1123 const OatDexFile* oat_dex_file = klass->GetDexFile().GetOatDexFile();
1124 if (oat_dex_file != nullptr) {
1125 const OatFile* oat_file = oat_dex_file->GetOatFile();
1126 const char* dex2oat_cmdline =
1127 oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kDex2OatCmdLineKey);
1128 LOG(FATAL_WITHOUT_ABORT) << " OatFile: " << oat_file->GetLocation()
1129 << "; " << (dex2oat_cmdline != nullptr ? dex2oat_cmdline : "<not recorded>");
1130 }
1131 }
1132
DumpB74410240DebugData(ArtMethod ** sp)1133 static void DumpB74410240DebugData(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
1134 // Mimick the search for the caller and dump some data while doing so.
1135 LOG(FATAL_WITHOUT_ABORT) << "Dumping debugging data, please attach a bugreport to b/74410240.";
1136
1137 constexpr CalleeSaveType type = CalleeSaveType::kSaveRefsAndArgs;
1138 CHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type));
1139
1140 constexpr size_t callee_frame_size = RuntimeCalleeSaveFrame::GetFrameSize(type);
1141 auto** caller_sp = reinterpret_cast<ArtMethod**>(
1142 reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
1143 constexpr size_t callee_return_pc_offset = RuntimeCalleeSaveFrame::GetReturnPcOffset(type);
1144 uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
1145 (reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
1146 ArtMethod* outer_method = *caller_sp;
1147
1148 const OatQuickMethodHeader* current_code = outer_method->GetOatQuickMethodHeader(caller_pc);
1149 CHECK(current_code != nullptr);
1150 CHECK(current_code->IsOptimized());
1151 uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
1152 CodeInfo code_info(current_code);
1153 StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
1154 CHECK(stack_map.IsValid());
1155 uint32_t dex_pc = stack_map.GetDexPc();
1156
1157 // Log the outer method and its associated dex file and class table pointer which can be used
1158 // to find out if the inlined methods were defined by other dex file(s) or class loader(s).
1159 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1160 LOG(FATAL_WITHOUT_ABORT) << "Outer: " << outer_method->PrettyMethod()
1161 << " native pc: " << caller_pc
1162 << " dex pc: " << dex_pc
1163 << " dex file: " << outer_method->GetDexFile()->GetLocation()
1164 << " class table: " << class_linker->ClassTableForClassLoader(outer_method->GetClassLoader());
1165 DumpB74410240ClassData(outer_method->GetDeclaringClass());
1166 LOG(FATAL_WITHOUT_ABORT) << " instruction: " << DumpInstruction(outer_method, dex_pc);
1167
1168 ArtMethod* caller = outer_method;
1169 BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map);
1170 for (InlineInfo inline_info : inline_infos) {
1171 const char* tag = "";
1172 dex_pc = inline_info.GetDexPc();
1173 if (inline_info.EncodesArtMethod()) {
1174 tag = "encoded ";
1175 caller = inline_info.GetArtMethod();
1176 } else {
1177 uint32_t method_index = code_info.GetMethodIndexOf(inline_info);
1178 if (dex_pc == static_cast<uint32_t>(-1)) {
1179 tag = "special ";
1180 CHECK(inline_info.Equals(inline_infos.back()));
1181 caller = WellKnownClasses::java_lang_String_charAt;
1182 CHECK_EQ(caller->GetDexMethodIndex(), method_index);
1183 } else {
1184 ObjPtr<mirror::DexCache> dex_cache = caller->GetDexCache();
1185 ObjPtr<mirror::ClassLoader> class_loader = caller->GetClassLoader();
1186 caller = class_linker->LookupResolvedMethod(method_index, dex_cache, class_loader);
1187 CHECK(caller != nullptr);
1188 }
1189 }
1190 LOG(FATAL_WITHOUT_ABORT) << "InlineInfo #" << inline_info.Row()
1191 << ": " << tag << caller->PrettyMethod()
1192 << " dex pc: " << dex_pc
1193 << " dex file: " << caller->GetDexFile()->GetLocation()
1194 << " class table: "
1195 << class_linker->ClassTableForClassLoader(caller->GetClassLoader());
1196 DumpB74410240ClassData(caller->GetDeclaringClass());
1197 LOG(FATAL_WITHOUT_ABORT) << " instruction: " << DumpInstruction(caller, dex_pc);
1198 }
1199 }
1200
1201 // Lazily resolve a method for quick. Called by stub code.
artQuickResolutionTrampoline(ArtMethod * called,mirror::Object * receiver,Thread * self,ArtMethod ** sp)1202 extern "C" const void* artQuickResolutionTrampoline(
1203 ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp)
1204 REQUIRES_SHARED(Locks::mutator_lock_) {
1205 // The resolution trampoline stashes the resolved method into the callee-save frame to transport
1206 // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely
1207 // does not have the same stack layout as the callee-save method).
1208 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false);
1209 // Start new JNI local reference state
1210 JNIEnvExt* env = self->GetJniEnv();
1211 ScopedObjectAccessUnchecked soa(env);
1212 ScopedJniEnvLocalRefState env_state(env);
1213 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up");
1214
1215 // Compute details about the called method (avoid GCs)
1216 ClassLinker* linker = Runtime::Current()->GetClassLinker();
1217 InvokeType invoke_type;
1218 MethodReference called_method(nullptr, 0);
1219 const bool called_method_known_on_entry = !called->IsRuntimeMethod();
1220 ArtMethod* caller = nullptr;
1221 if (!called_method_known_on_entry) {
1222 uint32_t dex_pc;
1223 caller = QuickArgumentVisitor::GetCallingMethodAndDexPc(sp, &dex_pc);
1224 called_method.dex_file = caller->GetDexFile();
1225
1226 {
1227 CodeItemInstructionAccessor accessor(caller->DexInstructions());
1228 CHECK_LT(dex_pc, accessor.InsnsSizeInCodeUnits());
1229 const Instruction& instr = accessor.InstructionAt(dex_pc);
1230 Instruction::Code instr_code = instr.Opcode();
1231 bool is_range;
1232 switch (instr_code) {
1233 case Instruction::INVOKE_DIRECT:
1234 invoke_type = kDirect;
1235 is_range = false;
1236 break;
1237 case Instruction::INVOKE_DIRECT_RANGE:
1238 invoke_type = kDirect;
1239 is_range = true;
1240 break;
1241 case Instruction::INVOKE_STATIC:
1242 invoke_type = kStatic;
1243 is_range = false;
1244 break;
1245 case Instruction::INVOKE_STATIC_RANGE:
1246 invoke_type = kStatic;
1247 is_range = true;
1248 break;
1249 case Instruction::INVOKE_SUPER:
1250 invoke_type = kSuper;
1251 is_range = false;
1252 break;
1253 case Instruction::INVOKE_SUPER_RANGE:
1254 invoke_type = kSuper;
1255 is_range = true;
1256 break;
1257 case Instruction::INVOKE_VIRTUAL:
1258 invoke_type = kVirtual;
1259 is_range = false;
1260 break;
1261 case Instruction::INVOKE_VIRTUAL_RANGE:
1262 invoke_type = kVirtual;
1263 is_range = true;
1264 break;
1265 case Instruction::INVOKE_INTERFACE:
1266 invoke_type = kInterface;
1267 is_range = false;
1268 break;
1269 case Instruction::INVOKE_INTERFACE_RANGE:
1270 invoke_type = kInterface;
1271 is_range = true;
1272 break;
1273 default:
1274 DumpB74410240DebugData(sp);
1275 LOG(FATAL) << "Unexpected call into trampoline: " << instr.DumpString(nullptr);
1276 UNREACHABLE();
1277 }
1278 called_method.index = (is_range) ? instr.VRegB_3rc() : instr.VRegB_35c();
1279 VLOG(dex) << "Accessed dex file for invoke " << invoke_type << " "
1280 << called_method.index;
1281 }
1282 } else {
1283 invoke_type = kStatic;
1284 called_method.dex_file = called->GetDexFile();
1285 called_method.index = called->GetDexMethodIndex();
1286 }
1287 std::string_view shorty =
1288 called_method.dex_file->GetMethodShortyView(called_method.GetMethodId());
1289 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, &soa);
1290 visitor.VisitArguments();
1291 self->EndAssertNoThreadSuspension(old_cause);
1292 const bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface;
1293 // Resolve method filling in dex cache.
1294 if (!called_method_known_on_entry) {
1295 StackHandleScope<1> hs(self);
1296 mirror::Object* fake_receiver = nullptr;
1297 HandleWrapper<mirror::Object> h_receiver(
1298 hs.NewHandleWrapper(virtual_or_interface ? &receiver : &fake_receiver));
1299 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file);
1300 called = linker->ResolveMethodWithChecks(called_method.index, caller, invoke_type);
1301 }
1302 const void* code = nullptr;
1303 if (LIKELY(!self->IsExceptionPending())) {
1304 // Incompatible class change should have been handled in resolve method.
1305 CHECK(!called->CheckIncompatibleClassChange(invoke_type))
1306 << called->PrettyMethod() << " " << invoke_type;
1307 if (virtual_or_interface || invoke_type == kSuper) {
1308 // Refine called method based on receiver for kVirtual/kInterface, and
1309 // caller for kSuper.
1310 ArtMethod* orig_called = called;
1311 if (invoke_type == kVirtual) {
1312 CHECK(receiver != nullptr) << invoke_type;
1313 called = receiver->GetClass()->FindVirtualMethodForVirtual(called, kRuntimePointerSize);
1314 } else if (invoke_type == kInterface) {
1315 CHECK(receiver != nullptr) << invoke_type;
1316 called = receiver->GetClass()->FindVirtualMethodForInterface(called, kRuntimePointerSize);
1317 } else {
1318 DCHECK_EQ(invoke_type, kSuper);
1319 CHECK(caller != nullptr) << invoke_type;
1320 ObjPtr<mirror::Class> ref_class = linker->LookupResolvedType(
1321 caller->GetDexFile()->GetMethodId(called_method.index).class_idx_, caller);
1322 if (ref_class->IsInterface()) {
1323 called = ref_class->FindVirtualMethodForInterfaceSuper(called, kRuntimePointerSize);
1324 } else {
1325 called = caller->GetDeclaringClass()->GetSuperClass()->GetVTableEntry(
1326 called->GetMethodIndex(), kRuntimePointerSize);
1327 }
1328 }
1329
1330 CHECK(called != nullptr) << orig_called->PrettyMethod() << " "
1331 << mirror::Object::PrettyTypeOf(receiver) << " "
1332 << invoke_type << " " << orig_called->GetVtableIndex();
1333 }
1334 // Now that we know the actual target, update .bss entry in oat file, if
1335 // any.
1336 if (!called_method_known_on_entry) {
1337 // We only put non copied methods in the BSS. Putting a copy can lead to an
1338 // odd situation where the ArtMethod being executed is unrelated to the
1339 // receiver of the method.
1340 called = called->GetCanonicalMethod();
1341 if (invoke_type == kSuper || invoke_type == kInterface || invoke_type == kVirtual) {
1342 if (called->GetDexFile() == called_method.dex_file) {
1343 called_method.index = called->GetDexMethodIndex();
1344 } else {
1345 called_method.index = called->FindDexMethodIndexInOtherDexFile(
1346 *called_method.dex_file, called_method.index);
1347 DCHECK_NE(called_method.index, dex::kDexNoIndex);
1348 }
1349 }
1350 ArtMethod* outer_method = QuickArgumentVisitor::GetOuterMethod(sp);
1351 MaybeUpdateBssMethodEntry(called, called_method, outer_method);
1352 }
1353
1354 // Static invokes need class initialization check but instance invokes can proceed even if
1355 // the class is erroneous, i.e. in the edge case of escaping instances of erroneous classes.
1356 bool success = true;
1357 if (called->StillNeedsClinitCheck()) {
1358 // Ensure that the called method's class is initialized.
1359 StackHandleScope<1> hs(soa.Self());
1360 Handle<mirror::Class> h_called_class = hs.NewHandle(called->GetDeclaringClass());
1361 success = linker->EnsureInitialized(soa.Self(), h_called_class, true, true);
1362 }
1363 if (success) {
1364 // When the clinit check is at entry of the AOT/nterp code, we do the clinit check
1365 // before doing the suspend check. To ensure the code sees the latest
1366 // version of the class (the code doesn't do a read barrier to reduce
1367 // size), do a suspend check now.
1368 self->CheckSuspend();
1369 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
1370 // Check if we need instrumented code here. Since resolution stubs could suspend, it is
1371 // possible that we instrumented the entry points after we started executing the resolution
1372 // stub.
1373 code = instrumentation->GetMaybeInstrumentedCodeForInvoke(called);
1374 } else {
1375 DCHECK(called->GetDeclaringClass()->IsErroneous());
1376 DCHECK(self->IsExceptionPending());
1377 }
1378 }
1379 CHECK_EQ(code == nullptr, self->IsExceptionPending());
1380 // Fixup any locally saved objects may have moved during a GC.
1381 visitor.FixupReferences();
1382 // Place called method in callee-save frame to be placed as first argument to quick method.
1383 *sp = called;
1384
1385 return code;
1386 }
1387
1388 /*
1389 * This class uses a couple of observations to unite the different calling conventions through
1390 * a few constants.
1391 *
1392 * 1) Number of registers used for passing is normally even, so counting down has no penalty for
1393 * possible alignment.
1394 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point
1395 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote
1396 * when we have to split things
1397 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats
1398 * and we can use Int handling directly.
1399 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code
1400 * necessary when widening. Also, widening of Ints will take place implicitly, and the
1401 * extension should be compatible with Aarch64, which mandates copying the available bits
1402 * into LSB and leaving the rest unspecified.
1403 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on
1404 * the stack.
1405 * 6) There is only little endian.
1406 *
1407 *
1408 * Actual work is supposed to be done in a delegate of the template type. The interface is as
1409 * follows:
1410 *
1411 * void PushGpr(uintptr_t): Add a value for the next GPR
1412 *
1413 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need
1414 * padding, that is, think the architecture is 32b and aligns 64b.
1415 *
1416 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to
1417 * split this if necessary. The current state will have aligned, if
1418 * necessary.
1419 *
1420 * void PushStack(uintptr_t): Push a value to the stack.
1421 */
1422 template<class T> class BuildNativeCallFrameStateMachine {
1423 public:
1424 static constexpr bool kNaNBoxing = QuickArgumentVisitor::NaNBoxing();
1425 #if defined(__arm__)
1426 static constexpr bool kNativeSoftFloatAbi = true;
1427 static constexpr bool kNativeSoftFloatAfterHardFloat = false;
1428 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3
1429 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs.
1430
1431 static constexpr size_t kRegistersNeededForLong = 2;
1432 static constexpr size_t kRegistersNeededForDouble = 2;
1433 static constexpr bool kMultiRegistersAligned = true;
1434 static constexpr bool kMultiGPRegistersWidened = false;
1435 static constexpr bool kAlignLongOnStack = true;
1436 static constexpr bool kAlignDoubleOnStack = true;
1437 #elif defined(__aarch64__)
1438 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI.
1439 static constexpr bool kNativeSoftFloatAfterHardFloat = false;
1440 static constexpr size_t kNumNativeGprArgs = 8; // 8 arguments passed in GPRs.
1441 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs.
1442
1443 static constexpr size_t kRegistersNeededForLong = 1;
1444 static constexpr size_t kRegistersNeededForDouble = 1;
1445 static constexpr bool kMultiRegistersAligned = false;
1446 static constexpr bool kMultiGPRegistersWidened = false;
1447 static constexpr bool kAlignLongOnStack = false;
1448 static constexpr bool kAlignDoubleOnStack = false;
1449 #elif defined(__riscv)
1450 static constexpr bool kNativeSoftFloatAbi = false;
1451 static constexpr bool kNativeSoftFloatAfterHardFloat = true;
1452 static constexpr size_t kNumNativeGprArgs = 8;
1453 static constexpr size_t kNumNativeFprArgs = 8;
1454
1455 static constexpr size_t kRegistersNeededForLong = 1;
1456 static constexpr size_t kRegistersNeededForDouble = 1;
1457 static constexpr bool kMultiRegistersAligned = false;
1458 static constexpr bool kMultiGPRegistersWidened = true;
1459 static constexpr bool kAlignLongOnStack = false;
1460 static constexpr bool kAlignDoubleOnStack = false;
1461 #elif defined(__i386__)
1462 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp
1463 static constexpr bool kNativeSoftFloatAfterHardFloat = false;
1464 static constexpr size_t kNumNativeGprArgs = 0; // 0 arguments passed in GPRs.
1465 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs.
1466
1467 static constexpr size_t kRegistersNeededForLong = 2;
1468 static constexpr size_t kRegistersNeededForDouble = 2;
1469 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways
1470 static constexpr bool kMultiGPRegistersWidened = false;
1471 static constexpr bool kAlignLongOnStack = false;
1472 static constexpr bool kAlignDoubleOnStack = false;
1473 #elif defined(__x86_64__)
1474 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI.
1475 static constexpr bool kNativeSoftFloatAfterHardFloat = false;
1476 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs.
1477 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs.
1478
1479 static constexpr size_t kRegistersNeededForLong = 1;
1480 static constexpr size_t kRegistersNeededForDouble = 1;
1481 static constexpr bool kMultiRegistersAligned = false;
1482 static constexpr bool kMultiGPRegistersWidened = false;
1483 static constexpr bool kAlignLongOnStack = false;
1484 static constexpr bool kAlignDoubleOnStack = false;
1485 #else
1486 #error "Unsupported architecture"
1487 #endif
1488
1489 public:
BuildNativeCallFrameStateMachine(T * delegate)1490 explicit BuildNativeCallFrameStateMachine(T* delegate)
1491 : gpr_index_(kNumNativeGprArgs),
1492 fpr_index_(kNumNativeFprArgs),
1493 stack_entries_(0),
1494 delegate_(delegate) {
1495 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff
1496 // the next register is even; counting down is just to make the compiler happy...
1497 static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even");
1498 static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even");
1499 }
1500
~BuildNativeCallFrameStateMachine()1501 virtual ~BuildNativeCallFrameStateMachine() {}
1502
HavePointerGpr() const1503 bool HavePointerGpr() const {
1504 return gpr_index_ > 0;
1505 }
1506
AdvancePointer(const void * val)1507 void AdvancePointer(const void* val) {
1508 if (HavePointerGpr()) {
1509 gpr_index_--;
1510 PushGpr(reinterpret_cast<uintptr_t>(val));
1511 } else {
1512 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b
1513 PushStack(reinterpret_cast<uintptr_t>(val));
1514 gpr_index_ = 0;
1515 }
1516 }
1517
HaveIntGpr() const1518 bool HaveIntGpr() const {
1519 return gpr_index_ > 0;
1520 }
1521
AdvanceInt(uint32_t val)1522 void AdvanceInt(uint32_t val) {
1523 if (HaveIntGpr()) {
1524 gpr_index_--;
1525 if (kMultiGPRegistersWidened) {
1526 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t));
1527 PushGpr(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val)));
1528 } else {
1529 PushGpr(val);
1530 }
1531 } else {
1532 stack_entries_++;
1533 if (kMultiGPRegistersWidened) {
1534 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t));
1535 PushStack(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val)));
1536 } else {
1537 PushStack(val);
1538 }
1539 gpr_index_ = 0;
1540 }
1541 }
1542
HaveLongGpr() const1543 bool HaveLongGpr() const {
1544 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0);
1545 }
1546
LongGprNeedsPadding() const1547 bool LongGprNeedsPadding() const {
1548 return kRegistersNeededForLong > 1 && // only pad when using multiple registers
1549 kAlignLongOnStack && // and when it needs alignment
1550 (gpr_index_ & 1) == 1; // counter is odd, see constructor
1551 }
1552
LongStackNeedsPadding() const1553 bool LongStackNeedsPadding() const {
1554 return kRegistersNeededForLong > 1 && // only pad when using multiple registers
1555 kAlignLongOnStack && // and when it needs 8B alignment
1556 (stack_entries_ & 1) == 1; // counter is odd
1557 }
1558
AdvanceLong(uint64_t val)1559 void AdvanceLong(uint64_t val) {
1560 if (HaveLongGpr()) {
1561 if (LongGprNeedsPadding()) {
1562 PushGpr(0);
1563 gpr_index_--;
1564 }
1565 if (kRegistersNeededForLong == 1) {
1566 PushGpr(static_cast<uintptr_t>(val));
1567 } else {
1568 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1569 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1570 }
1571 gpr_index_ -= kRegistersNeededForLong;
1572 } else {
1573 if (LongStackNeedsPadding()) {
1574 PushStack(0);
1575 stack_entries_++;
1576 }
1577 if (kRegistersNeededForLong == 1) {
1578 PushStack(static_cast<uintptr_t>(val));
1579 stack_entries_++;
1580 } else {
1581 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1582 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1583 stack_entries_ += 2;
1584 }
1585 gpr_index_ = 0;
1586 }
1587 }
1588
HaveFloatFpr() const1589 bool HaveFloatFpr() const {
1590 return fpr_index_ > 0;
1591 }
1592
AdvanceFloat(uint32_t val)1593 void AdvanceFloat(uint32_t val) {
1594 if (kNativeSoftFloatAbi) {
1595 AdvanceInt(val);
1596 } else if (HaveFloatFpr()) {
1597 fpr_index_--;
1598 if (kRegistersNeededForDouble == 1) {
1599 if (kNaNBoxing) {
1600 // NaN boxing: no widening, just use the bits, but reset upper bits to 1s.
1601 // See e.g. RISC-V manual, D extension, section "NaN Boxing of Narrower Values".
1602 PushFpr8(UINT64_C(0xFFFFFFFF00000000) | static_cast<uint64_t>(val));
1603 } else {
1604 // No widening, just use the bits.
1605 PushFpr8(static_cast<uint64_t>(val));
1606 }
1607 } else {
1608 PushFpr4(val);
1609 }
1610 } else if (kNativeSoftFloatAfterHardFloat) {
1611 // After using FP arg registers, pass FP args in general purpose registers or on the stack.
1612 AdvanceInt(val);
1613 } else {
1614 stack_entries_++;
1615 PushStack(static_cast<uintptr_t>(val));
1616 fpr_index_ = 0;
1617 }
1618 }
1619
HaveDoubleFpr() const1620 bool HaveDoubleFpr() const {
1621 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0);
1622 }
1623
DoubleFprNeedsPadding() const1624 bool DoubleFprNeedsPadding() const {
1625 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers
1626 kAlignDoubleOnStack && // and when it needs alignment
1627 (fpr_index_ & 1) == 1; // counter is odd, see constructor
1628 }
1629
DoubleStackNeedsPadding() const1630 bool DoubleStackNeedsPadding() const {
1631 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers
1632 kAlignDoubleOnStack && // and when it needs 8B alignment
1633 (stack_entries_ & 1) == 1; // counter is odd
1634 }
1635
AdvanceDouble(uint64_t val)1636 void AdvanceDouble(uint64_t val) {
1637 if (kNativeSoftFloatAbi) {
1638 AdvanceLong(val);
1639 } else if (HaveDoubleFpr()) {
1640 if (DoubleFprNeedsPadding()) {
1641 PushFpr4(0);
1642 fpr_index_--;
1643 }
1644 PushFpr8(val);
1645 fpr_index_ -= kRegistersNeededForDouble;
1646 } else if (kNativeSoftFloatAfterHardFloat) {
1647 // After using FP arg registers, pass FP args in general purpose registers or on the stack.
1648 AdvanceLong(val);
1649 } else {
1650 if (DoubleStackNeedsPadding()) {
1651 PushStack(0);
1652 stack_entries_++;
1653 }
1654 if (kRegistersNeededForDouble == 1) {
1655 PushStack(static_cast<uintptr_t>(val));
1656 stack_entries_++;
1657 } else {
1658 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1659 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1660 stack_entries_ += 2;
1661 }
1662 fpr_index_ = 0;
1663 }
1664 }
1665
GetStackEntries() const1666 uint32_t GetStackEntries() const {
1667 return stack_entries_;
1668 }
1669
GetNumberOfUsedGprs() const1670 uint32_t GetNumberOfUsedGprs() const {
1671 return kNumNativeGprArgs - gpr_index_;
1672 }
1673
GetNumberOfUsedFprs() const1674 uint32_t GetNumberOfUsedFprs() const {
1675 return kNumNativeFprArgs - fpr_index_;
1676 }
1677
1678 private:
PushGpr(uintptr_t val)1679 void PushGpr(uintptr_t val) {
1680 delegate_->PushGpr(val);
1681 }
PushFpr4(float val)1682 void PushFpr4(float val) {
1683 delegate_->PushFpr4(val);
1684 }
PushFpr8(uint64_t val)1685 void PushFpr8(uint64_t val) {
1686 delegate_->PushFpr8(val);
1687 }
PushStack(uintptr_t val)1688 void PushStack(uintptr_t val) {
1689 delegate_->PushStack(val);
1690 }
1691
1692 uint32_t gpr_index_; // Number of free GPRs
1693 uint32_t fpr_index_; // Number of free FPRs
1694 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not
1695 // extended
1696 T* const delegate_; // What Push implementation gets called
1697 };
1698
1699 // Computes the sizes of register stacks and call stack area. Handling of references can be extended
1700 // in subclasses.
1701 //
1702 // To handle native pointers, use "L" in the shorty for an object reference, which simulates
1703 // them with handles.
1704 class ComputeNativeCallFrameSize {
1705 public:
ComputeNativeCallFrameSize()1706 ComputeNativeCallFrameSize() : num_stack_entries_(0) {}
1707
~ComputeNativeCallFrameSize()1708 virtual ~ComputeNativeCallFrameSize() {}
1709
GetStackSize() const1710 uint32_t GetStackSize() const {
1711 return num_stack_entries_ * sizeof(uintptr_t);
1712 }
1713
LayoutStackArgs(uint8_t * sp8) const1714 uint8_t* LayoutStackArgs(uint8_t* sp8) const {
1715 sp8 -= GetStackSize();
1716 // Align by kStackAlignment; it is at least as strict as native stack alignment.
1717 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
1718 return sp8;
1719 }
1720
WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> * sm)1721 virtual void WalkHeader(
1722 [[maybe_unused]] BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm)
1723 REQUIRES_SHARED(Locks::mutator_lock_) {}
1724
Walk(std::string_view shorty)1725 void Walk(std::string_view shorty) REQUIRES_SHARED(Locks::mutator_lock_) {
1726 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
1727
1728 WalkHeader(&sm);
1729
1730 for (char c : shorty.substr(1u)) {
1731 Primitive::Type cur_type_ = Primitive::GetType(c);
1732 switch (cur_type_) {
1733 case Primitive::kPrimNot:
1734 sm.AdvancePointer(nullptr);
1735 break;
1736 case Primitive::kPrimBoolean:
1737 case Primitive::kPrimByte:
1738 case Primitive::kPrimChar:
1739 case Primitive::kPrimShort:
1740 case Primitive::kPrimInt:
1741 sm.AdvanceInt(0);
1742 break;
1743 case Primitive::kPrimFloat:
1744 sm.AdvanceFloat(0);
1745 break;
1746 case Primitive::kPrimDouble:
1747 sm.AdvanceDouble(0);
1748 break;
1749 case Primitive::kPrimLong:
1750 sm.AdvanceLong(0);
1751 break;
1752 default:
1753 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty;
1754 UNREACHABLE();
1755 }
1756 }
1757
1758 num_stack_entries_ = sm.GetStackEntries();
1759 }
1760
PushGpr(uintptr_t)1761 void PushGpr(uintptr_t /* val */) {
1762 // not optimizing registers, yet
1763 }
1764
PushFpr4(float)1765 void PushFpr4(float /* val */) {
1766 // not optimizing registers, yet
1767 }
1768
PushFpr8(uint64_t)1769 void PushFpr8(uint64_t /* val */) {
1770 // not optimizing registers, yet
1771 }
1772
PushStack(uintptr_t)1773 void PushStack(uintptr_t /* val */) {
1774 // counting is already done in the superclass
1775 }
1776
1777 protected:
1778 uint32_t num_stack_entries_;
1779 };
1780
1781 class ComputeGenericJniFrameSize final : public ComputeNativeCallFrameSize {
1782 public:
ComputeGenericJniFrameSize(bool critical_native)1783 explicit ComputeGenericJniFrameSize(bool critical_native)
1784 : critical_native_(critical_native) {}
1785
ComputeLayout(ArtMethod ** managed_sp,std::string_view shorty)1786 uintptr_t* ComputeLayout(ArtMethod** managed_sp, std::string_view shorty)
1787 REQUIRES_SHARED(Locks::mutator_lock_) {
1788 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
1789
1790 Walk(shorty);
1791
1792 // Add space for cookie.
1793 DCHECK_ALIGNED(managed_sp, sizeof(uintptr_t));
1794 static_assert(sizeof(uintptr_t) >= sizeof(jni::LRTSegmentState));
1795 uint8_t* sp8 = reinterpret_cast<uint8_t*>(managed_sp) - sizeof(uintptr_t);
1796
1797 // Layout stack arguments.
1798 sp8 = LayoutStackArgs(sp8);
1799
1800 // Return the new bottom.
1801 DCHECK_ALIGNED(sp8, sizeof(uintptr_t));
1802 return reinterpret_cast<uintptr_t*>(sp8);
1803 }
1804
GetStartGprRegs(uintptr_t * reserved_area)1805 static uintptr_t* GetStartGprRegs(uintptr_t* reserved_area) {
1806 return reserved_area;
1807 }
1808
GetStartFprRegs(uintptr_t * reserved_area)1809 static uint32_t* GetStartFprRegs(uintptr_t* reserved_area) {
1810 constexpr size_t num_gprs =
1811 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs;
1812 return reinterpret_cast<uint32_t*>(GetStartGprRegs(reserved_area) + num_gprs);
1813 }
1814
GetHiddenArgSlot(uintptr_t * reserved_area)1815 static uintptr_t* GetHiddenArgSlot(uintptr_t* reserved_area) {
1816 // Note: `num_fprs` is 0 on architectures where sizeof(uintptr_t) does not match the
1817 // FP register size (it is actually 0 on all supported 32-bit architectures).
1818 constexpr size_t num_fprs =
1819 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs;
1820 return reinterpret_cast<uintptr_t*>(GetStartFprRegs(reserved_area)) + num_fprs;
1821 }
1822
GetOutArgsSpSlot(uintptr_t * reserved_area)1823 static uintptr_t* GetOutArgsSpSlot(uintptr_t* reserved_area) {
1824 return GetHiddenArgSlot(reserved_area) + 1;
1825 }
1826
1827 // Add JNIEnv* and jobj/jclass before the shorty-derived elements.
1828 void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) override
1829 REQUIRES_SHARED(Locks::mutator_lock_);
1830
1831 private:
1832 const bool critical_native_;
1833 };
1834
WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> * sm)1835 void ComputeGenericJniFrameSize::WalkHeader(
1836 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) {
1837 // First 2 parameters are always excluded for @CriticalNative.
1838 if (UNLIKELY(critical_native_)) {
1839 return;
1840 }
1841
1842 // JNIEnv
1843 sm->AdvancePointer(nullptr);
1844
1845 // Class object or this as first argument
1846 sm->AdvancePointer(nullptr);
1847 }
1848
1849 // Class to push values to three separate regions. Used to fill the native call part. Adheres to
1850 // the template requirements of BuildGenericJniFrameStateMachine.
1851 class FillNativeCall {
1852 public:
FillNativeCall(uintptr_t * gpr_regs,uint32_t * fpr_regs,uintptr_t * stack_args)1853 FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) :
1854 cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {}
1855
~FillNativeCall()1856 virtual ~FillNativeCall() {}
1857
Reset(uintptr_t * gpr_regs,uint32_t * fpr_regs,uintptr_t * stack_args)1858 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) {
1859 cur_gpr_reg_ = gpr_regs;
1860 cur_fpr_reg_ = fpr_regs;
1861 cur_stack_arg_ = stack_args;
1862 }
1863
PushGpr(uintptr_t val)1864 void PushGpr(uintptr_t val) {
1865 *cur_gpr_reg_ = val;
1866 cur_gpr_reg_++;
1867 }
1868
PushFpr4(float val)1869 void PushFpr4(float val) {
1870 *cur_fpr_reg_ = val;
1871 cur_fpr_reg_++;
1872 }
1873
PushFpr8(uint64_t val)1874 void PushFpr8(uint64_t val) {
1875 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_);
1876 *tmp = val;
1877 cur_fpr_reg_ += 2;
1878 }
1879
PushStack(uintptr_t val)1880 void PushStack(uintptr_t val) {
1881 *cur_stack_arg_ = val;
1882 cur_stack_arg_++;
1883 }
1884
1885 private:
1886 uintptr_t* cur_gpr_reg_;
1887 uint32_t* cur_fpr_reg_;
1888 uintptr_t* cur_stack_arg_;
1889 };
1890
1891 // Visits arguments on the stack placing them into a region lower down the stack for the benefit
1892 // of transitioning into native code.
1893 class BuildGenericJniFrameVisitor final : public QuickArgumentVisitor {
1894 public:
BuildGenericJniFrameVisitor(Thread * self,bool is_static,bool critical_native,std::string_view shorty,ArtMethod ** managed_sp,uintptr_t * reserved_area)1895 BuildGenericJniFrameVisitor(Thread* self,
1896 bool is_static,
1897 bool critical_native,
1898 std::string_view shorty,
1899 ArtMethod** managed_sp,
1900 uintptr_t* reserved_area)
1901 : QuickArgumentVisitor(managed_sp, is_static, shorty),
1902 jni_call_(nullptr, nullptr, nullptr),
1903 sm_(&jni_call_),
1904 current_vreg_(nullptr) {
1905 DCHECK_ALIGNED(managed_sp, kStackAlignment);
1906 DCHECK_ALIGNED(reserved_area, sizeof(uintptr_t));
1907
1908 ComputeGenericJniFrameSize fsc(critical_native);
1909 uintptr_t* out_args_sp = fsc.ComputeLayout(managed_sp, shorty);
1910
1911 // Store hidden argument for @CriticalNative.
1912 uintptr_t* hidden_arg_slot = fsc.GetHiddenArgSlot(reserved_area);
1913 constexpr uintptr_t kGenericJniTag = 1u;
1914 ArtMethod* method = *managed_sp;
1915 *hidden_arg_slot = critical_native ? (reinterpret_cast<uintptr_t>(method) | kGenericJniTag)
1916 : 0xebad6a89u; // Bad value.
1917
1918 // Set out args SP.
1919 uintptr_t* out_args_sp_slot = fsc.GetOutArgsSpSlot(reserved_area);
1920 *out_args_sp_slot = reinterpret_cast<uintptr_t>(out_args_sp);
1921
1922 // Prepare vreg pointer for spilling references.
1923 static constexpr size_t frame_size =
1924 RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
1925 current_vreg_ = reinterpret_cast<uint32_t*>(
1926 reinterpret_cast<uint8_t*>(managed_sp) + frame_size + sizeof(ArtMethod*));
1927
1928 jni_call_.Reset(fsc.GetStartGprRegs(reserved_area),
1929 fsc.GetStartFprRegs(reserved_area),
1930 out_args_sp);
1931
1932 // First 2 parameters are always excluded for CriticalNative methods.
1933 if (LIKELY(!critical_native)) {
1934 // jni environment is always first argument
1935 sm_.AdvancePointer(self->GetJniEnv());
1936
1937 if (is_static) {
1938 // The `jclass` is a pointer to the method's declaring class.
1939 // The declaring class must be marked.
1940 auto* declaring_class = reinterpret_cast<mirror::CompressedReference<mirror::Class>*>(
1941 method->GetDeclaringClassAddressWithoutBarrier());
1942 if (gUseReadBarrier) {
1943 artJniReadBarrier(method);
1944 }
1945 sm_.AdvancePointer(declaring_class);
1946 } // else "this" reference is already handled by QuickArgumentVisitor.
1947 }
1948 }
1949
1950 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
1951
1952 private:
1953 FillNativeCall jni_call_;
1954 BuildNativeCallFrameStateMachine<FillNativeCall> sm_;
1955
1956 // Pointer to the current vreg in caller's reserved out vreg area.
1957 // Used for spilling reference arguments.
1958 uint32_t* current_vreg_;
1959
1960 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor);
1961 };
1962
Visit()1963 void BuildGenericJniFrameVisitor::Visit() {
1964 Primitive::Type type = GetParamPrimitiveType();
1965 switch (type) {
1966 case Primitive::kPrimLong: {
1967 jlong long_arg;
1968 if (IsSplitLongOrDouble()) {
1969 long_arg = ReadSplitLongParam();
1970 } else {
1971 long_arg = *reinterpret_cast<jlong*>(GetParamAddress());
1972 }
1973 sm_.AdvanceLong(long_arg);
1974 current_vreg_ += 2u;
1975 break;
1976 }
1977 case Primitive::kPrimDouble: {
1978 uint64_t double_arg;
1979 if (IsSplitLongOrDouble()) {
1980 // Read into union so that we don't case to a double.
1981 double_arg = ReadSplitLongParam();
1982 } else {
1983 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress());
1984 }
1985 sm_.AdvanceDouble(double_arg);
1986 current_vreg_ += 2u;
1987 break;
1988 }
1989 case Primitive::kPrimNot: {
1990 mirror::Object* obj =
1991 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress())->AsMirrorPtr();
1992 StackReference<mirror::Object>* spill_ref =
1993 reinterpret_cast<StackReference<mirror::Object>*>(current_vreg_);
1994 spill_ref->Assign(obj);
1995 sm_.AdvancePointer(obj != nullptr ? spill_ref : nullptr);
1996 current_vreg_ += 1u;
1997 break;
1998 }
1999 case Primitive::kPrimFloat:
2000 sm_.AdvanceFloat(*reinterpret_cast<uint32_t*>(GetParamAddress()));
2001 current_vreg_ += 1u;
2002 break;
2003 case Primitive::kPrimBoolean: // Fall-through.
2004 case Primitive::kPrimByte: // Fall-through.
2005 case Primitive::kPrimChar: // Fall-through.
2006 case Primitive::kPrimShort: // Fall-through.
2007 case Primitive::kPrimInt: // Fall-through.
2008 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress()));
2009 current_vreg_ += 1u;
2010 break;
2011 case Primitive::kPrimVoid:
2012 LOG(FATAL) << "UNREACHABLE";
2013 UNREACHABLE();
2014 }
2015 }
2016
2017 /*
2018 * Initializes the reserved area assumed to be directly below `managed_sp` for a native call:
2019 *
2020 * On entry, the stack has a standard callee-save frame above `managed_sp`,
2021 * and the reserved area below it. Starting below `managed_sp`, we reserve space
2022 * for local reference cookie (not present for @CriticalNative), HandleScope
2023 * (not present for @CriticalNative) and stack args (if args do not fit into
2024 * registers). At the bottom of the reserved area, there is space for register
2025 * arguments, hidden arg (for @CriticalNative) and the SP for the native call
2026 * (i.e. pointer to the stack args area), which the calling stub shall load
2027 * to perform the native call. We fill all these fields, perform class init
2028 * check (for static methods) and/or locking (for synchronized methods) if
2029 * needed and return to the stub.
2030 *
2031 * The return value is the pointer to the native code, null on failure.
2032 *
2033 * NO_THREAD_SAFETY_ANALYSIS: Depending on the use case, the trampoline may
2034 * or may not lock a synchronization object and transition out of Runnable.
2035 */
artQuickGenericJniTrampoline(Thread * self,ArtMethod ** managed_sp,uintptr_t * reserved_area)2036 extern "C" const void* artQuickGenericJniTrampoline(Thread* self,
2037 ArtMethod** managed_sp,
2038 uintptr_t* reserved_area)
2039 REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS {
2040 // Note: We cannot walk the stack properly until fixed up below.
2041 ArtMethod* called = *managed_sp;
2042 DCHECK(called->IsNative()) << called->PrettyMethod(true);
2043 Runtime* runtime = Runtime::Current();
2044 std::string_view shorty = called->GetShortyView();
2045 bool critical_native = called->IsCriticalNative();
2046 bool fast_native = called->IsFastNative();
2047 bool normal_native = !critical_native && !fast_native;
2048
2049 // Run the visitor and update sp.
2050 BuildGenericJniFrameVisitor visitor(self,
2051 called->IsStatic(),
2052 critical_native,
2053 shorty,
2054 managed_sp,
2055 reserved_area);
2056 {
2057 ScopedAssertNoThreadSuspension sants(__FUNCTION__);
2058 visitor.VisitArguments();
2059 }
2060
2061 // Fix up managed-stack things in Thread. After this we can walk the stack.
2062 self->SetTopOfStackGenericJniTagged(managed_sp);
2063
2064 self->VerifyStack();
2065
2066 // We can now walk the stack if needed by JIT GC from MethodEntered() for JIT-on-first-use.
2067 jit::Jit* jit = runtime->GetJit();
2068 if (jit != nullptr) {
2069 jit->MethodEntered(self, called);
2070 }
2071
2072 // We can set the entrypoint of a native method to generic JNI even when the
2073 // class hasn't been initialized, so we need to do the initialization check
2074 // before invoking the native code.
2075 if (called->StillNeedsClinitCheck()) {
2076 // Ensure static method's class is initialized.
2077 StackHandleScope<1> hs(self);
2078 Handle<mirror::Class> h_class = hs.NewHandle(called->GetDeclaringClass());
2079 if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
2080 DCHECK(Thread::Current()->IsExceptionPending()) << called->PrettyMethod();
2081 return nullptr; // Report error.
2082 }
2083 }
2084
2085 instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
2086 if (UNLIKELY(instr->HasMethodEntryListeners())) {
2087 instr->MethodEnterEvent(self, called);
2088 if (self->IsExceptionPending()) {
2089 return nullptr;
2090 }
2091 }
2092
2093 // Skip calling `artJniMethodStart()` for @CriticalNative and @FastNative.
2094 if (LIKELY(normal_native)) {
2095 // Start JNI.
2096 if (called->IsSynchronized()) {
2097 ObjPtr<mirror::Object> lock = GetGenericJniSynchronizationObject(self, called);
2098 DCHECK(lock != nullptr);
2099 lock->MonitorEnter(self);
2100 if (self->IsExceptionPending()) {
2101 return nullptr; // Report error.
2102 }
2103 }
2104 if (UNLIKELY(self->ReadFlag(ThreadFlag::kMonitorJniEntryExit))) {
2105 artJniMonitoredMethodStart(self);
2106 } else {
2107 artJniMethodStart(self);
2108 }
2109 } else {
2110 DCHECK(!called->IsSynchronized())
2111 << "@FastNative/@CriticalNative and synchronize is not supported";
2112 }
2113
2114 // Skip pushing LRT frame for @CriticalNative.
2115 if (LIKELY(!critical_native)) {
2116 // Push local reference frame.
2117 JNIEnvExt* env = self->GetJniEnv();
2118 DCHECK(env != nullptr);
2119 uint32_t cookie = bit_cast<uint32_t>(env->PushLocalReferenceFrame());
2120
2121 // Save the cookie on the stack.
2122 uint32_t* sp32 = reinterpret_cast<uint32_t*>(managed_sp);
2123 *(sp32 - 1) = cookie;
2124 }
2125
2126 // Retrieve the stored native code.
2127 // Note that it may point to the lookup stub or trampoline.
2128 // FIXME: This is broken for @CriticalNative as the art_jni_dlsym_lookup_stub
2129 // does not handle that case. Calls from compiled stubs are also broken.
2130 void const* nativeCode = called->GetEntryPointFromJni();
2131
2132 VLOG(third_party_jni) << "GenericJNI: "
2133 << called->PrettyMethod()
2134 << " -> "
2135 << std::hex << reinterpret_cast<uintptr_t>(nativeCode);
2136
2137 // Return native code.
2138 return nativeCode;
2139 }
2140
2141 // Defined in quick_jni_entrypoints.cc.
2142 extern uint64_t GenericJniMethodEnd(Thread* self,
2143 uint32_t saved_local_ref_cookie,
2144 jvalue result,
2145 uint64_t result_f,
2146 ArtMethod* called);
2147
2148 /*
2149 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
2150 * unlocking.
2151 */
artQuickGenericJniEndTrampoline(Thread * self,jvalue result,uint64_t result_f)2152 extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self,
2153 jvalue result,
2154 uint64_t result_f) {
2155 // We're here just back from a native call. We don't have the shared mutator lock at this point
2156 // yet until we call GoToRunnable() later in GenericJniMethodEnd(). Accessing objects or doing
2157 // anything that requires a mutator lock before that would cause problems as GC may have the
2158 // exclusive mutator lock and may be moving objects, etc.
2159 ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
2160 DCHECK(self->GetManagedStack()->GetTopQuickFrameGenericJniTag());
2161 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
2162 ArtMethod* called = *sp;
2163 uint32_t cookie = *(sp32 - 1);
2164 return GenericJniMethodEnd(self, cookie, result, result_f, called);
2165 }
2166
2167 // We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value
2168 // for the method pointer.
2169 //
2170 // It is valid to use this, as at the usage points here (returns from C functions) we are assuming
2171 // to hold the mutator lock (see REQUIRES_SHARED(Locks::mutator_lock_) annotations).
2172
2173 template <InvokeType type>
artInvokeCommon(uint32_t method_idx,ObjPtr<mirror::Object> this_object,Thread * self,ArtMethod ** sp)2174 static TwoWordReturn artInvokeCommon(uint32_t method_idx,
2175 ObjPtr<mirror::Object> this_object,
2176 Thread* self,
2177 ArtMethod** sp) {
2178 ScopedQuickEntrypointChecks sqec(self);
2179 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
2180 uint32_t dex_pc;
2181 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethodAndDexPc(sp, &dex_pc);
2182 CodeItemInstructionAccessor accessor(caller_method->DexInstructions());
2183 DCHECK_LT(dex_pc, accessor.InsnsSizeInCodeUnits());
2184 const Instruction& instr = accessor.InstructionAt(dex_pc);
2185 bool string_init = false;
2186 ArtMethod* method = FindMethodToCall<type>(
2187 self, caller_method, &this_object, instr, /* only_lookup_tls_cache= */ true, &string_init);
2188
2189 if (UNLIKELY(method == nullptr)) {
2190 if (self->IsExceptionPending()) {
2191 // Return a failure if the first lookup threw an exception.
2192 return GetTwoWordFailureValue(); // Failure.
2193 }
2194 const DexFile* dex_file = caller_method->GetDexFile();
2195 std::string_view shorty =
2196 dex_file->GetMethodShortyView(dex_file->GetMethodId(method_idx));
2197 {
2198 // Remember the args in case a GC happens in FindMethodToCall.
2199 ScopedObjectAccessUnchecked soa(self->GetJniEnv());
2200 RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, &soa);
2201 visitor.VisitArguments();
2202
2203 method = FindMethodToCall<type>(self,
2204 caller_method,
2205 &this_object,
2206 instr,
2207 /* only_lookup_tls_cache= */ false,
2208 &string_init);
2209
2210 visitor.FixupReferences();
2211 }
2212
2213 if (UNLIKELY(method == nullptr)) {
2214 CHECK(self->IsExceptionPending());
2215 return GetTwoWordFailureValue(); // Failure.
2216 }
2217 }
2218 DCHECK(!self->IsExceptionPending());
2219 const void* code = method->GetEntryPointFromQuickCompiledCode();
2220
2221 // When we return, the caller will branch to this address, so it had better not be 0!
2222 DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod()
2223 << " location: "
2224 << method->GetDexFile()->GetLocation();
2225
2226 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
2227 reinterpret_cast<uintptr_t>(method));
2228 }
2229
2230 // Explicit artInvokeCommon template function declarations to please analysis tool.
2231 #define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type) \
2232 template REQUIRES_SHARED(Locks::mutator_lock_) \
2233 TwoWordReturn artInvokeCommon<type>( \
2234 uint32_t method_idx, ObjPtr<mirror::Object> his_object, Thread* self, ArtMethod** sp)
2235
2236 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual);
2237 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface);
2238 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect);
2239 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic);
2240 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper);
2241 #undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL
2242
2243 // See comments in runtime_support_asm.S
artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2244 extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck(
2245 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2246 REQUIRES_SHARED(Locks::mutator_lock_) {
2247 return artInvokeCommon<kInterface>(method_idx, this_object, self, sp);
2248 }
2249
artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2250 extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(
2251 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2252 REQUIRES_SHARED(Locks::mutator_lock_) {
2253 return artInvokeCommon<kDirect>(method_idx, this_object, self, sp);
2254 }
2255
artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2256 extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
2257 uint32_t method_idx, [[maybe_unused]] mirror::Object* this_object, Thread* self, ArtMethod** sp)
2258 REQUIRES_SHARED(Locks::mutator_lock_) {
2259 // For static, this_object is not required and may be random garbage. Don't pass it down so that
2260 // it doesn't cause ObjPtr alignment failure check.
2261 return artInvokeCommon<kStatic>(method_idx, nullptr, self, sp);
2262 }
2263
artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2264 extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
2265 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2266 REQUIRES_SHARED(Locks::mutator_lock_) {
2267 return artInvokeCommon<kSuper>(method_idx, this_object, self, sp);
2268 }
2269
artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2270 extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
2271 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2272 REQUIRES_SHARED(Locks::mutator_lock_) {
2273 return artInvokeCommon<kVirtual>(method_idx, this_object, self, sp);
2274 }
2275
2276 // Determine target of interface dispatch. The interface method and this object are known non-null.
2277 // The interface method is the method returned by the dex cache in the conflict trampoline.
artInvokeInterfaceTrampoline(ArtMethod * interface_method,mirror::Object * raw_this_object,Thread * self,ArtMethod ** sp)2278 extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_method,
2279 mirror::Object* raw_this_object,
2280 Thread* self,
2281 ArtMethod** sp)
2282 REQUIRES_SHARED(Locks::mutator_lock_) {
2283 ScopedQuickEntrypointChecks sqec(self);
2284
2285 Runtime* runtime = Runtime::Current();
2286 bool resolve_method = ((interface_method == nullptr) || interface_method->IsRuntimeMethod());
2287 if (UNLIKELY(resolve_method)) {
2288 // The interface method is unresolved, so resolve it in the dex file of the caller.
2289 // Fetch the dex_method_idx of the target interface method from the caller.
2290 StackHandleScope<1> hs(self);
2291 Handle<mirror::Object> this_object = hs.NewHandle(raw_this_object);
2292 uint32_t dex_pc;
2293 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethodAndDexPc(sp, &dex_pc);
2294 uint32_t dex_method_idx;
2295 const Instruction& instr = caller_method->DexInstructions().InstructionAt(dex_pc);
2296 Instruction::Code instr_code = instr.Opcode();
2297 DCHECK(instr_code == Instruction::INVOKE_INTERFACE ||
2298 instr_code == Instruction::INVOKE_INTERFACE_RANGE)
2299 << "Unexpected call into interface trampoline: " << instr.DumpString(nullptr);
2300 if (instr_code == Instruction::INVOKE_INTERFACE) {
2301 dex_method_idx = instr.VRegB_35c();
2302 } else {
2303 DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
2304 dex_method_idx = instr.VRegB_3rc();
2305 }
2306
2307 const DexFile& dex_file = *caller_method->GetDexFile();
2308 std::string_view shorty =
2309 dex_file.GetMethodShortyView(dex_file.GetMethodId(dex_method_idx));
2310 {
2311 // Remember the args in case a GC happens in ClassLinker::ResolveMethod().
2312 ScopedObjectAccessUnchecked soa(self->GetJniEnv());
2313 RememberForGcArgumentVisitor visitor(sp, false, shorty, &soa);
2314 visitor.VisitArguments();
2315 ClassLinker* class_linker = runtime->GetClassLinker();
2316 interface_method = class_linker->ResolveMethodId(dex_method_idx, caller_method);
2317 visitor.FixupReferences();
2318 }
2319
2320 if (UNLIKELY(interface_method == nullptr)) {
2321 CHECK(self->IsExceptionPending());
2322 return GetTwoWordFailureValue(); // Failure.
2323 }
2324 ArtMethod* outer_method = QuickArgumentVisitor::GetOuterMethod(sp);
2325 MaybeUpdateBssMethodEntry(
2326 interface_method, MethodReference(&dex_file, dex_method_idx), outer_method);
2327
2328 // Refresh `raw_this_object` which may have changed after resolution.
2329 raw_this_object = this_object.Get();
2330 }
2331
2332 // The compiler and interpreter make sure the conflict trampoline is never
2333 // called on a method that resolves to j.l.Object.
2334 DCHECK(!interface_method->GetDeclaringClass()->IsObjectClass());
2335 DCHECK(interface_method->GetDeclaringClass()->IsInterface());
2336 DCHECK(!interface_method->IsRuntimeMethod());
2337 DCHECK(!interface_method->IsCopied());
2338
2339 ObjPtr<mirror::Object> obj_this = raw_this_object;
2340 ObjPtr<mirror::Class> cls = obj_this->GetClass();
2341 uint32_t imt_index = interface_method->GetImtIndex();
2342 ImTable* imt = cls->GetImt(kRuntimePointerSize);
2343 ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize);
2344 DCHECK(conflict_method->IsRuntimeMethod());
2345
2346 if (UNLIKELY(resolve_method)) {
2347 // Now that we know the interface method, look it up in the conflict table.
2348 ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize);
2349 DCHECK(current_table != nullptr);
2350 ArtMethod* method = current_table->Lookup(interface_method, kRuntimePointerSize);
2351 if (method != nullptr) {
2352 return GetTwoWordSuccessValue(
2353 reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()),
2354 reinterpret_cast<uintptr_t>(method));
2355 }
2356 // Interface method is not in the conflict table. Continue looking up in the
2357 // iftable.
2358 }
2359
2360 ArtMethod* method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize);
2361 if (UNLIKELY(method == nullptr)) {
2362 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
2363 ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(
2364 interface_method, obj_this.Ptr(), caller_method);
2365 return GetTwoWordFailureValue();
2366 }
2367
2368 // We arrive here if we have found an implementation, and it is not in the ImtConflictTable.
2369 // We create a new table with the new pair { interface_method, method }.
2370
2371 // Classes in the boot image should never need to update conflict methods in
2372 // their IMT.
2373 CHECK(!runtime->GetHeap()->ObjectIsInBootImageSpace(cls.Ptr())) << cls->PrettyClass();
2374 ArtMethod* new_conflict_method = runtime->GetClassLinker()->AddMethodToConflictTable(
2375 cls.Ptr(),
2376 conflict_method,
2377 interface_method,
2378 method);
2379 if (new_conflict_method != conflict_method) {
2380 // Update the IMT if we create a new conflict method. No fence needed here, as the
2381 // data is consistent.
2382 imt->Set(imt_index,
2383 new_conflict_method,
2384 kRuntimePointerSize);
2385 }
2386
2387 const void* code = method->GetEntryPointFromQuickCompiledCode();
2388
2389 // When we return, the caller will branch to this address, so it had better not be 0!
2390 DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod()
2391 << " location: " << method->GetDexFile()->GetLocation();
2392
2393 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
2394 reinterpret_cast<uintptr_t>(method));
2395 }
2396
2397 // Returns uint64_t representing raw bits from JValue.
artInvokePolymorphic(mirror::Object * raw_receiver,Thread * self,ArtMethod ** sp)2398 extern "C" uint64_t artInvokePolymorphic(mirror::Object* raw_receiver, Thread* self, ArtMethod** sp)
2399 REQUIRES_SHARED(Locks::mutator_lock_) {
2400 ScopedQuickEntrypointChecks sqec(self);
2401 DCHECK(raw_receiver != nullptr);
2402 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
2403
2404 // Start new JNI local reference state
2405 JNIEnvExt* env = self->GetJniEnv();
2406 ScopedObjectAccessUnchecked soa(env);
2407 ScopedJniEnvLocalRefState env_state(env);
2408 const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe.");
2409
2410 // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC.
2411 uint32_t dex_pc;
2412 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethodAndDexPc(sp, &dex_pc);
2413 const Instruction& inst = caller_method->DexInstructions().InstructionAt(dex_pc);
2414 DCHECK(inst.Opcode() == Instruction::INVOKE_POLYMORPHIC ||
2415 inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE);
2416 const dex::ProtoIndex proto_idx(inst.VRegH());
2417 std::string_view shorty = caller_method->GetDexFile()->GetShortyView(proto_idx);
2418 static const bool kMethodIsStatic = false; // invoke() and invokeExact() are not static.
2419 RememberForGcArgumentVisitor gc_visitor(sp, kMethodIsStatic, shorty, &soa);
2420 gc_visitor.VisitArguments();
2421
2422 // Wrap raw_receiver in a Handle for safety.
2423 StackHandleScope<3> hs(self);
2424 Handle<mirror::Object> receiver_handle(hs.NewHandle(raw_receiver));
2425 raw_receiver = nullptr;
2426 self->EndAssertNoThreadSuspension(old_cause);
2427
2428 // Resolve method.
2429 ClassLinker* linker = Runtime::Current()->GetClassLinker();
2430 ArtMethod* resolved_method = linker->ResolveMethodWithChecks(
2431 inst.VRegB(), caller_method, kVirtual);
2432
2433 DCHECK_EQ(ArtMethod::NumArgRegisters(shorty) + 1u, (uint32_t)inst.VRegA());
2434 DCHECK_EQ(resolved_method->IsStatic(), kMethodIsStatic);
2435
2436 // Fix references before constructing the shadow frame.
2437 gc_visitor.FixupReferences();
2438
2439 // Construct shadow frame placing arguments consecutively from |first_arg|.
2440 const bool is_range = (inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE);
2441 const size_t num_vregs = is_range ? inst.VRegA_4rcc() : inst.VRegA_45cc();
2442 const size_t first_arg = 0;
2443 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
2444 CREATE_SHADOW_FRAME(num_vregs, resolved_method, dex_pc);
2445 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
2446 ScopedStackedShadowFramePusher frame_pusher(self, shadow_frame);
2447 BuildQuickShadowFrameVisitor shadow_frame_builder(sp,
2448 kMethodIsStatic,
2449 shorty,
2450 shadow_frame,
2451 first_arg);
2452 shadow_frame_builder.VisitArguments();
2453
2454 // Push a transition back into managed code onto the linked list in thread.
2455 ManagedStack fragment;
2456 self->PushManagedStackFragment(&fragment);
2457
2458 // Call DoInvokePolymorphic with |is_range| = true, as shadow frame has argument registers in
2459 // consecutive order.
2460 RangeInstructionOperands operands(first_arg + 1, num_vregs - 1);
2461 Intrinsics intrinsic = resolved_method->GetIntrinsic();
2462 JValue result;
2463 bool success = false;
2464 if (resolved_method->GetDeclaringClass() == GetClassRoot<mirror::MethodHandle>(linker)) {
2465 Handle<mirror::MethodType> method_type(
2466 hs.NewHandle(linker->ResolveMethodType(self, proto_idx, caller_method)));
2467 if (UNLIKELY(method_type.IsNull())) {
2468 // This implies we couldn't resolve one or more types in this method handle.
2469 CHECK(self->IsExceptionPending());
2470 return 0UL;
2471 }
2472
2473 Handle<mirror::MethodHandle> method_handle(hs.NewHandle(
2474 ObjPtr<mirror::MethodHandle>::DownCast(receiver_handle.Get())));
2475 if (intrinsic == Intrinsics::kMethodHandleInvokeExact) {
2476 success = MethodHandleInvokeExact(self,
2477 *shadow_frame,
2478 method_handle,
2479 method_type,
2480 &operands,
2481 &result);
2482 } else {
2483 DCHECK_EQ(static_cast<uint32_t>(intrinsic),
2484 static_cast<uint32_t>(Intrinsics::kMethodHandleInvoke));
2485 success = MethodHandleInvoke(self,
2486 *shadow_frame,
2487 method_handle,
2488 method_type,
2489 &operands,
2490 &result);
2491 }
2492 } else {
2493 DCHECK_EQ(GetClassRoot<mirror::VarHandle>(linker), resolved_method->GetDeclaringClass());
2494 Handle<mirror::VarHandle> var_handle(hs.NewHandle(
2495 ObjPtr<mirror::VarHandle>::DownCast(receiver_handle.Get())));
2496 mirror::VarHandle::AccessMode access_mode =
2497 mirror::VarHandle::GetAccessModeByIntrinsic(intrinsic);
2498
2499 success = VarHandleInvokeAccessor(self,
2500 *shadow_frame,
2501 var_handle,
2502 caller_method,
2503 proto_idx,
2504 access_mode,
2505 &operands,
2506 &result);
2507 }
2508
2509 DCHECK(success || self->IsExceptionPending());
2510
2511 // Pop transition record.
2512 self->PopManagedStackFragment(fragment);
2513
2514 bool is_ref = (shorty[0] == 'L');
2515 Runtime::Current()->GetInstrumentation()->PushDeoptContextIfNeeded(
2516 self, DeoptimizationMethodType::kDefault, is_ref, result);
2517
2518 return NanBoxResultIfNeeded(result.GetJ(), shorty[0]);
2519 }
2520
artInvokePolymorphicWithHiddenReceiver(mirror::Object * raw_receiver,Thread * self,ArtMethod ** sp)2521 extern "C" uint64_t artInvokePolymorphicWithHiddenReceiver(mirror::Object* raw_receiver,
2522 Thread* self,
2523 ArtMethod** sp)
2524 REQUIRES_SHARED(Locks::mutator_lock_) {
2525 ScopedQuickEntrypointChecks sqec(self);
2526 DCHECK(raw_receiver != nullptr);
2527 DCHECK(raw_receiver->InstanceOf(WellKnownClasses::java_lang_invoke_MethodHandle.Get()));
2528 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
2529
2530 JNIEnvExt* env = self->GetJniEnv();
2531 ScopedObjectAccessUnchecked soa(env);
2532 ScopedJniEnvLocalRefState env_state(env);
2533 const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe.");
2534
2535 // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC.
2536 uint32_t dex_pc;
2537 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethodAndDexPc(sp, &dex_pc);
2538 const Instruction& inst = caller_method->DexInstructions().InstructionAt(dex_pc);
2539 DCHECK(inst.Opcode() == Instruction::INVOKE_POLYMORPHIC ||
2540 inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE);
2541 const dex::ProtoIndex proto_idx(inst.VRegH());
2542 std::string_view shorty = caller_method->GetDexFile()->GetShortyView(proto_idx);
2543
2544 // invokeExact is not a static method, but here we use custom calling convention and the receiver
2545 // (MethodHandle) object is not passed as a first argument, but through different means and hence
2546 // shorty and arguments allocation looks as-if invokeExact was static.
2547 RememberForGcArgumentVisitor gc_visitor(sp, /* is_static= */ true, shorty, &soa);
2548 gc_visitor.VisitArguments();
2549
2550 // Wrap raw_receiver in a Handle for safety.
2551 StackHandleScope<2> hs(self);
2552 Handle<mirror::MethodHandle> method_handle(
2553 hs.NewHandle(down_cast<mirror::MethodHandle*>(raw_receiver)));
2554
2555 self->EndAssertNoThreadSuspension(old_cause);
2556
2557 ClassLinker* linker = Runtime::Current()->GetClassLinker();
2558 ArtMethod* invoke_exact = WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact;
2559 if (kIsDebugBuild) {
2560 ArtMethod* resolved_method = linker->ResolveMethodWithChecks(
2561 inst.VRegB(), caller_method, kVirtual);
2562 CHECK_EQ(resolved_method, invoke_exact);
2563 }
2564
2565 Handle<mirror::MethodType> method_type(
2566 hs.NewHandle(linker->ResolveMethodType(self, proto_idx, caller_method)));
2567 if (UNLIKELY(method_type.IsNull())) {
2568 // This implies we couldn't resolve one or more types in this method handle.
2569 CHECK(self->IsExceptionPending());
2570 return 0UL;
2571 }
2572
2573 DCHECK_EQ(ArtMethod::NumArgRegisters(shorty) + 1u, (uint32_t)inst.VRegA());
2574
2575 // Fix references before constructing the shadow frame.
2576 gc_visitor.FixupReferences();
2577
2578 // Construct shadow frame placing arguments consecutively from |first_arg|.
2579 const bool is_range = inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE;
2580 const size_t num_vregs = is_range ? inst.VRegA_4rcc() : inst.VRegA_45cc();
2581 const size_t first_arg = 0;
2582 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
2583 CREATE_SHADOW_FRAME(num_vregs, invoke_exact, dex_pc);
2584 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
2585 ScopedStackedShadowFramePusher frame_pusher(self, shadow_frame);
2586 // Pretend the method is static, see the gc_visitor comment above.
2587 BuildQuickShadowFrameVisitor shadow_frame_builder(sp,
2588 /* is_static= */ true,
2589 shorty,
2590 shadow_frame,
2591 first_arg);
2592 // Receiver is not passed as a regular argument, adding it to ShadowFrame manually.
2593 shadow_frame_builder.SetReceiver(method_handle.Get());
2594 shadow_frame_builder.VisitArguments();
2595
2596 // Push a transition back into managed code onto the linked list in thread.
2597 ManagedStack fragment;
2598 self->PushManagedStackFragment(&fragment);
2599
2600 RangeInstructionOperands operands(first_arg + 1, num_vregs - 1);
2601 JValue result;
2602 bool success = MethodHandleInvokeExact(self,
2603 *shadow_frame,
2604 method_handle,
2605 method_type,
2606 &operands,
2607 &result);
2608
2609 DCHECK(success || self->IsExceptionPending());
2610
2611 // Pop transition record.
2612 self->PopManagedStackFragment(fragment);
2613
2614 bool is_ref = shorty[0] == 'L';
2615 Runtime::Current()->GetInstrumentation()->PushDeoptContextIfNeeded(
2616 self, DeoptimizationMethodType::kDefault, is_ref, result);
2617
2618 return NanBoxResultIfNeeded(result.GetJ(), shorty[0]);
2619 }
2620
2621 // Returns uint64_t representing raw bits from JValue.
artInvokeCustom(uint32_t call_site_idx,Thread * self,ArtMethod ** sp)2622 extern "C" uint64_t artInvokeCustom(uint32_t call_site_idx, Thread* self, ArtMethod** sp)
2623 REQUIRES_SHARED(Locks::mutator_lock_) {
2624 ScopedQuickEntrypointChecks sqec(self);
2625 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
2626
2627 // invoke-custom is effectively a static call (no receiver).
2628 static constexpr bool kMethodIsStatic = true;
2629
2630 // Start new JNI local reference state
2631 JNIEnvExt* env = self->GetJniEnv();
2632 ScopedObjectAccessUnchecked soa(env);
2633 ScopedJniEnvLocalRefState env_state(env);
2634
2635 const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe.");
2636
2637 // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC.
2638 uint32_t dex_pc;
2639 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethodAndDexPc(sp, &dex_pc);
2640 const DexFile* dex_file = caller_method->GetDexFile();
2641 const dex::ProtoIndex proto_idx(dex_file->GetProtoIndexForCallSite(call_site_idx));
2642 std::string_view shorty = caller_method->GetDexFile()->GetShortyView(proto_idx);
2643
2644 // Construct the shadow frame placing arguments consecutively from |first_arg|.
2645 const size_t first_arg = 0;
2646 const size_t num_vregs = ArtMethod::NumArgRegisters(shorty);
2647 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
2648 CREATE_SHADOW_FRAME(num_vregs, caller_method, dex_pc);
2649 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
2650 ScopedStackedShadowFramePusher frame_pusher(self, shadow_frame);
2651 BuildQuickShadowFrameVisitor shadow_frame_builder(sp,
2652 kMethodIsStatic,
2653 shorty,
2654 shadow_frame,
2655 first_arg);
2656 shadow_frame_builder.VisitArguments();
2657
2658 // Push a transition back into managed code onto the linked list in thread.
2659 ManagedStack fragment;
2660 self->PushManagedStackFragment(&fragment);
2661 self->EndAssertNoThreadSuspension(old_cause);
2662
2663 // Perform the invoke-custom operation.
2664 RangeInstructionOperands operands(first_arg, num_vregs);
2665 JValue result;
2666 bool success =
2667 interpreter::DoInvokeCustom(self, *shadow_frame, call_site_idx, &operands, &result);
2668 DCHECK(success || self->IsExceptionPending());
2669
2670 // Pop transition record.
2671 self->PopManagedStackFragment(fragment);
2672
2673 bool is_ref = (shorty[0] == 'L');
2674 Runtime::Current()->GetInstrumentation()->PushDeoptContextIfNeeded(
2675 self, DeoptimizationMethodType::kDefault, is_ref, result);
2676
2677 return NanBoxResultIfNeeded(result.GetJ(), shorty[0]);
2678 }
2679
artJniMethodEntryHook(Thread * self)2680 extern "C" void artJniMethodEntryHook(Thread* self)
2681 REQUIRES_SHARED(Locks::mutator_lock_) {
2682 instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
2683 ArtMethod* method = *self->GetManagedStack()->GetTopQuickFrame();
2684 instr->MethodEnterEvent(self, method);
2685 }
2686
artMethodEntryHook(ArtMethod * method,Thread * self,ArtMethod ** sp)2687 extern "C" Context* artMethodEntryHook(ArtMethod* method, Thread* self, ArtMethod** sp)
2688 REQUIRES_SHARED(Locks::mutator_lock_) {
2689 ScopedQuickEntrypointChecks sqec(self);
2690 instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
2691 if (instr->HasFastMethodEntryListenersOnly()) {
2692 instr->MethodEnterEvent(self, method);
2693 // No exception or deoptimization.
2694 return nullptr;
2695 }
2696
2697 if (instr->HasMethodEntryListeners()) {
2698 instr->MethodEnterEvent(self, method);
2699 // MethodEnter callback could have requested a deopt for ex: by setting a breakpoint, so
2700 // check if we need a deopt here.
2701 if (instr->ShouldDeoptimizeCaller(self, sp) || instr->IsDeoptimized(method)) {
2702 // Instrumentation can request deoptimizing only a particular method (for ex: when
2703 // there are break points on the method). In such cases deoptimize only this method.
2704 // FullFrame deoptimizations are handled on method exits.
2705 return artDeoptimizeFromCompiledCode(DeoptimizationKind::kDebugging, self);
2706 }
2707 } else {
2708 DCHECK(!instr->IsDeoptimized(method));
2709 }
2710 // No exception or deoptimization.
2711 return nullptr;
2712 }
2713
artMethodExitHook(Thread * self,ArtMethod ** sp,uint64_t * gpr_result,uint64_t * fpr_result,uint32_t frame_size)2714 extern "C" Context* artMethodExitHook(Thread* self,
2715 ArtMethod** sp,
2716 uint64_t* gpr_result,
2717 uint64_t* fpr_result,
2718 uint32_t frame_size)
2719 REQUIRES_SHARED(Locks::mutator_lock_) {
2720 ScopedQuickEntrypointChecks sqec(self);
2721 DCHECK_EQ(reinterpret_cast<uintptr_t>(self), reinterpret_cast<uintptr_t>(Thread::Current()));
2722 // Instrumentation exit stub must not be entered with a pending exception.
2723 CHECK(!self->IsExceptionPending())
2724 << "Enter instrumentation exit stub with pending exception " << self->GetException()->Dump();
2725
2726 instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
2727 DCHECK(instr->RunExitHooks());
2728
2729 ArtMethod* method = *sp;
2730 if (instr->HasFastMethodExitListenersOnly()) {
2731 // Fast method listeners are only used for tracing which don't need any deoptimization checks
2732 // or a return value.
2733 JValue return_value;
2734 instr->MethodExitEvent(self, method, /* frame= */ {}, return_value);
2735 // No exception or deoptimization.
2736 return nullptr;
2737 }
2738
2739 bool is_ref = false;
2740 if (instr->HasMethodExitListeners()) {
2741 StackHandleScope<1> hs(self);
2742
2743 CHECK(gpr_result != nullptr);
2744 CHECK(fpr_result != nullptr);
2745
2746 JValue return_value = instr->GetReturnValue(method, &is_ref, gpr_result, fpr_result);
2747 MutableHandle<mirror::Object> res(hs.NewHandle<mirror::Object>(nullptr));
2748 if (is_ref) {
2749 // Take a handle to the return value so we won't lose it if we suspend.
2750 res.Assign(return_value.GetL());
2751 }
2752 DCHECK(!method->IsRuntimeMethod());
2753
2754 // If we need a deoptimization MethodExitEvent will be called by the interpreter when it
2755 // re-executes the return instruction. For native methods we have to process method exit
2756 // events here since deoptimization just removes the native frame.
2757 instr->MethodExitEvent(self, method, /* frame= */ {}, return_value);
2758
2759 if (is_ref) {
2760 // Restore the return value if it's a reference since it might have moved.
2761 *reinterpret_cast<mirror::Object**>(gpr_result) = res.Get();
2762 return_value.SetL(res.Get());
2763 }
2764 }
2765
2766 if (self->IsExceptionPending() || self->ObserveAsyncException()) {
2767 // The exception was thrown from the method exit callback. We should not call method unwind
2768 // callbacks for this case.
2769 std::unique_ptr<Context> context =
2770 self->QuickDeliverException(/* is_method_exit_exception= */ true);
2771 DCHECK(context != nullptr);
2772 return context.release();
2773 }
2774
2775 // We should deoptimize here if the caller requires a deoptimization or if the current method
2776 // needs a deoptimization. We may need deoptimization for the current method if method exit
2777 // hooks requested this frame to be popped. IsForcedInterpreterNeededForUpcall checks for that.
2778 const bool deoptimize = instr->ShouldDeoptimizeCaller(self, sp, frame_size) ||
2779 Dbg::IsForcedInterpreterNeededForUpcall(self, method);
2780 if (deoptimize) {
2781 JValue ret_val = instr->GetReturnValue(method, &is_ref, gpr_result, fpr_result);
2782 DeoptimizationMethodType deopt_method_type = instr->GetDeoptimizationMethodType(method);
2783 self->PushDeoptimizationContext(
2784 ret_val, is_ref, self->GetException(), false, deopt_method_type);
2785 // Method exit callback has already been run for this method. So tell the deoptimizer to skip
2786 // callbacks for this frame.
2787 std::unique_ptr<Context> context = self->Deoptimize(DeoptimizationKind::kFullFrame,
2788 /* single_frame= */ false,
2789 /* skip_method_exit_callbacks= */ true);
2790 DCHECK(context != nullptr);
2791 return context.release();
2792 }
2793
2794 // No exception or deoptimization.
2795 return nullptr;
2796 }
2797
2798 } // namespace art
2799