xref: /aosp_15_r20/art/compiler/optimizing/code_generator_arm_vixl.h (revision 795d594fd825385562da6b089ea9b2033f3abf5a)
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_
18 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_
19 
20 #include "base/macros.h"
21 #include "base/pointer_size.h"
22 #include "class_root.h"
23 #include "code_generator.h"
24 #include "common_arm.h"
25 #include "dex/string_reference.h"
26 #include "dex/type_reference.h"
27 #include "driver/compiler_options.h"
28 #include "nodes.h"
29 #include "parallel_move_resolver.h"
30 #include "utils/arm/assembler_arm_vixl.h"
31 
32 // TODO(VIXL): Make VIXL compile cleanly with -Wshadow, -Wdeprecated-declarations.
33 #pragma GCC diagnostic push
34 #pragma GCC diagnostic ignored "-Wshadow"
35 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
36 #include "aarch32/constants-aarch32.h"
37 #include "aarch32/instructions-aarch32.h"
38 #include "aarch32/macro-assembler-aarch32.h"
39 #pragma GCC diagnostic pop
40 
41 namespace art HIDDEN {
42 
43 namespace linker {
44 class Thumb2RelativePatcherTest;
45 }  // namespace linker
46 
47 namespace arm {
48 
49 // This constant is used as an approximate margin when emission of veneer and literal pools
50 // must be blocked.
51 static constexpr int kMaxMacroInstructionSizeInBytes =
52     15 * vixl::aarch32::kMaxInstructionSizeInBytes;
53 
54 static const vixl::aarch32::Register kParameterCoreRegistersVIXL[] = {
55     vixl::aarch32::r1,
56     vixl::aarch32::r2,
57     vixl::aarch32::r3
58 };
59 static const size_t kParameterCoreRegistersLengthVIXL = arraysize(kParameterCoreRegistersVIXL);
60 static const vixl::aarch32::SRegister kParameterFpuRegistersVIXL[] = {
61     vixl::aarch32::s0,
62     vixl::aarch32::s1,
63     vixl::aarch32::s2,
64     vixl::aarch32::s3,
65     vixl::aarch32::s4,
66     vixl::aarch32::s5,
67     vixl::aarch32::s6,
68     vixl::aarch32::s7,
69     vixl::aarch32::s8,
70     vixl::aarch32::s9,
71     vixl::aarch32::s10,
72     vixl::aarch32::s11,
73     vixl::aarch32::s12,
74     vixl::aarch32::s13,
75     vixl::aarch32::s14,
76     vixl::aarch32::s15
77 };
78 static const size_t kParameterFpuRegistersLengthVIXL = arraysize(kParameterFpuRegistersVIXL);
79 
80 static const vixl::aarch32::Register kMethodRegister = vixl::aarch32::r0;
81 
82 // Callee saves core registers r5, r6, r7, r8 (except when emitting Baker
83 // read barriers, where it is used as Marking Register), r10, r11, and lr.
84 static const vixl::aarch32::RegisterList kCoreCalleeSaves = vixl::aarch32::RegisterList::Union(
85     vixl::aarch32::RegisterList(vixl::aarch32::r5,
86                                 vixl::aarch32::r6,
87                                 vixl::aarch32::r7),
88     // Do not consider r8 as a callee-save register with Baker read barriers.
89     (kReserveMarkingRegister
90          ? vixl::aarch32::RegisterList()
91          : vixl::aarch32::RegisterList(vixl::aarch32::r8)),
92     vixl::aarch32::RegisterList(vixl::aarch32::r10,
93                                 vixl::aarch32::r11,
94                                 vixl::aarch32::lr));
95 
96 // Callee saves FP registers s16 to s31 inclusive.
97 static const vixl::aarch32::SRegisterList kFpuCalleeSaves =
98     vixl::aarch32::SRegisterList(vixl::aarch32::s16, 16);
99 
100 static const vixl::aarch32::Register kRuntimeParameterCoreRegistersVIXL[] = {
101     vixl::aarch32::r0,
102     vixl::aarch32::r1,
103     vixl::aarch32::r2,
104     vixl::aarch32::r3
105 };
106 static const size_t kRuntimeParameterCoreRegistersLengthVIXL =
107     arraysize(kRuntimeParameterCoreRegistersVIXL);
108 static const vixl::aarch32::SRegister kRuntimeParameterFpuRegistersVIXL[] = {
109     vixl::aarch32::s0,
110     vixl::aarch32::s1,
111     vixl::aarch32::s2,
112     vixl::aarch32::s3
113 };
114 static const size_t kRuntimeParameterFpuRegistersLengthVIXL =
115     arraysize(kRuntimeParameterFpuRegistersVIXL);
116 
117 class LoadClassSlowPathARMVIXL;
118 class CodeGeneratorARMVIXL;
119 
120 using VIXLInt32Literal = vixl::aarch32::Literal<int32_t>;
121 using VIXLUInt32Literal = vixl::aarch32::Literal<uint32_t>;
122 
123 #define UNIMPLEMENTED_INTRINSIC_LIST_ARM(V)                                \
124   V(MathSignumFloat)                                                       \
125   V(MathSignumDouble)                                                      \
126   V(MathCopySignFloat)                                                     \
127   V(MathCopySignDouble)                                                    \
128   V(MathRoundDouble) /* Could be done by changing rounding mode, maybe? */ \
129   V(UnsafeCASLong)   /* High register pressure */                          \
130   V(SystemArrayCopyChar)                                                   \
131   V(LongDivideUnsigned)                                                    \
132   V(IntegerRemainderUnsigned)                                              \
133   V(LongRemainderUnsigned)                                                 \
134   V(CRC32Update)                                                           \
135   V(CRC32UpdateBytes)                                                      \
136   V(CRC32UpdateByteBuffer)                                                 \
137   V(FP16ToFloat)                                                           \
138   V(FP16ToHalf)                                                            \
139   V(FP16Floor)                                                             \
140   V(FP16Ceil)                                                              \
141   V(FP16Rint)                                                              \
142   V(FP16Greater)                                                           \
143   V(FP16GreaterEquals)                                                     \
144   V(FP16Less)                                                              \
145   V(FP16LessEquals)                                                        \
146   V(FP16Compare)                                                           \
147   V(FP16Min)                                                               \
148   V(FP16Max)                                                               \
149   V(MathMultiplyHigh)                                                      \
150   V(StringStringIndexOf)                                                   \
151   V(StringStringIndexOfAfter)                                              \
152   V(StringBufferAppend)                                                    \
153   V(StringBufferLength)                                                    \
154   V(StringBufferToString)                                                  \
155   V(StringBuilderAppendObject)                                             \
156   V(StringBuilderAppendString)                                             \
157   V(StringBuilderAppendCharSequence)                                       \
158   V(StringBuilderAppendCharArray)                                          \
159   V(StringBuilderAppendBoolean)                                            \
160   V(StringBuilderAppendChar)                                               \
161   V(StringBuilderAppendInt)                                                \
162   V(StringBuilderAppendLong)                                               \
163   V(StringBuilderAppendFloat)                                              \
164   V(StringBuilderAppendDouble)                                             \
165   V(StringBuilderLength)                                                   \
166   V(StringBuilderToString)                                                 \
167   V(SystemArrayCopyByte)                                                   \
168   V(SystemArrayCopyInt)                                                    \
169   V(UnsafeArrayBaseOffset)                                                 \
170   /* 1.8 */                                                                \
171   V(MathFmaDouble)                                                         \
172   V(MathFmaFloat)                                                          \
173   V(MethodHandleInvokeExact)                                               \
174   V(MethodHandleInvoke)                                                    \
175   /* OpenJDK 11 */                                                         \
176   V(JdkUnsafeArrayBaseOffset)                                              \
177   V(JdkUnsafeCASLong) /* High register pressure */                         \
178   V(JdkUnsafeCompareAndSetLong)
179 
GetStoreOperandType(DataType::Type type)180 ALWAYS_INLINE inline StoreOperandType GetStoreOperandType(DataType::Type type) {
181   switch (type) {
182     case DataType::Type::kReference:
183       return kStoreWord;
184     case DataType::Type::kBool:
185     case DataType::Type::kUint8:
186     case DataType::Type::kInt8:
187       return kStoreByte;
188     case DataType::Type::kUint16:
189     case DataType::Type::kInt16:
190       return kStoreHalfword;
191     case DataType::Type::kInt32:
192       return kStoreWord;
193     case DataType::Type::kInt64:
194       return kStoreWordPair;
195     case DataType::Type::kFloat32:
196       return kStoreSWord;
197     case DataType::Type::kFloat64:
198       return kStoreDWord;
199     default:
200       LOG(FATAL) << "Unreachable type " << type;
201       UNREACHABLE();
202   }
203 }
204 
205 class JumpTableARMVIXL : public DeletableArenaObject<kArenaAllocSwitchTable> {
206  public:
JumpTableARMVIXL(HPackedSwitch * switch_instr)207   explicit JumpTableARMVIXL(HPackedSwitch* switch_instr)
208       : switch_instr_(switch_instr),
209         table_start_(),
210         bb_addresses_(switch_instr->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
211     uint32_t num_entries = switch_instr_->GetNumEntries();
212     for (uint32_t i = 0; i < num_entries; i++) {
213       VIXLInt32Literal *lit = new VIXLInt32Literal(0, vixl32::RawLiteral::kManuallyPlaced);
214       bb_addresses_.emplace_back(lit);
215     }
216   }
217 
GetTableStartLabel()218   vixl::aarch32::Label* GetTableStartLabel() { return &table_start_; }
219 
220   void EmitTable(CodeGeneratorARMVIXL* codegen);
221   void FixTable(CodeGeneratorARMVIXL* codegen);
222 
223  private:
224   HPackedSwitch* const switch_instr_;
225   vixl::aarch32::Label table_start_;
226   ArenaVector<std::unique_ptr<VIXLInt32Literal>> bb_addresses_;
227 
228   DISALLOW_COPY_AND_ASSIGN(JumpTableARMVIXL);
229 };
230 
231 class InvokeRuntimeCallingConventionARMVIXL
232     : public CallingConvention<vixl::aarch32::Register, vixl::aarch32::SRegister> {
233  public:
InvokeRuntimeCallingConventionARMVIXL()234   InvokeRuntimeCallingConventionARMVIXL()
235       : CallingConvention(kRuntimeParameterCoreRegistersVIXL,
236                           kRuntimeParameterCoreRegistersLengthVIXL,
237                           kRuntimeParameterFpuRegistersVIXL,
238                           kRuntimeParameterFpuRegistersLengthVIXL,
239                           kArmPointerSize) {}
240 
241  private:
242   DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConventionARMVIXL);
243 };
244 
245 class InvokeDexCallingConventionARMVIXL
246     : public CallingConvention<vixl::aarch32::Register, vixl::aarch32::SRegister> {
247  public:
InvokeDexCallingConventionARMVIXL()248   InvokeDexCallingConventionARMVIXL()
249       : CallingConvention(kParameterCoreRegistersVIXL,
250                           kParameterCoreRegistersLengthVIXL,
251                           kParameterFpuRegistersVIXL,
252                           kParameterFpuRegistersLengthVIXL,
253                           kArmPointerSize) {}
254 
255  private:
256   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionARMVIXL);
257 };
258 
259 class InvokeDexCallingConventionVisitorARMVIXL : public InvokeDexCallingConventionVisitor {
260  public:
InvokeDexCallingConventionVisitorARMVIXL()261   InvokeDexCallingConventionVisitorARMVIXL() {}
~InvokeDexCallingConventionVisitorARMVIXL()262   virtual ~InvokeDexCallingConventionVisitorARMVIXL() {}
263 
264   Location GetNextLocation(DataType::Type type) override;
265   Location GetReturnLocation(DataType::Type type) const override;
266   Location GetMethodLocation() const override;
267 
268  private:
269   InvokeDexCallingConventionARMVIXL calling_convention;
270   uint32_t double_index_ = 0;
271 
272   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARMVIXL);
273 };
274 
275 class CriticalNativeCallingConventionVisitorARMVIXL : public InvokeDexCallingConventionVisitor {
276  public:
CriticalNativeCallingConventionVisitorARMVIXL(bool for_register_allocation)277   explicit CriticalNativeCallingConventionVisitorARMVIXL(bool for_register_allocation)
278       : for_register_allocation_(for_register_allocation) {}
279 
~CriticalNativeCallingConventionVisitorARMVIXL()280   virtual ~CriticalNativeCallingConventionVisitorARMVIXL() {}
281 
282   Location GetNextLocation(DataType::Type type) override;
283   Location GetReturnLocation(DataType::Type type) const override;
284   Location GetMethodLocation() const override;
285 
GetStackOffset()286   size_t GetStackOffset() const { return stack_offset_; }
287 
288  private:
289   // Register allocator does not support adjusting frame size, so we cannot provide final locations
290   // of stack arguments for register allocation. We ask the register allocator for any location and
291   // move these arguments to the right place after adjusting the SP when generating the call.
292   const bool for_register_allocation_;
293   size_t gpr_index_ = 0u;
294   size_t stack_offset_ = 0u;
295 
296   DISALLOW_COPY_AND_ASSIGN(CriticalNativeCallingConventionVisitorARMVIXL);
297 };
298 
299 class FieldAccessCallingConventionARMVIXL : public FieldAccessCallingConvention {
300  public:
FieldAccessCallingConventionARMVIXL()301   FieldAccessCallingConventionARMVIXL() {}
302 
GetObjectLocation()303   Location GetObjectLocation() const override {
304     return helpers::LocationFrom(vixl::aarch32::r1);
305   }
GetFieldIndexLocation()306   Location GetFieldIndexLocation() const override {
307     return helpers::LocationFrom(vixl::aarch32::r0);
308   }
GetReturnLocation(DataType::Type type)309   Location GetReturnLocation(DataType::Type type) const override {
310     return DataType::Is64BitType(type)
311         ? helpers::LocationFrom(vixl::aarch32::r0, vixl::aarch32::r1)
312         : helpers::LocationFrom(vixl::aarch32::r0);
313   }
GetSetValueLocation(DataType::Type type,bool is_instance)314   Location GetSetValueLocation(DataType::Type type, bool is_instance) const override {
315     return DataType::Is64BitType(type)
316         ? helpers::LocationFrom(vixl::aarch32::r2, vixl::aarch32::r3)
317         : (is_instance
318             ? helpers::LocationFrom(vixl::aarch32::r2)
319             : helpers::LocationFrom(vixl::aarch32::r1));
320   }
GetFpuLocation(DataType::Type type)321   Location GetFpuLocation(DataType::Type type) const override {
322     return DataType::Is64BitType(type)
323         ? helpers::LocationFrom(vixl::aarch32::s0, vixl::aarch32::s1)
324         : helpers::LocationFrom(vixl::aarch32::s0);
325   }
326 
327  private:
328   DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARMVIXL);
329 };
330 
331 class SlowPathCodeARMVIXL : public SlowPathCode {
332  public:
SlowPathCodeARMVIXL(HInstruction * instruction)333   explicit SlowPathCodeARMVIXL(HInstruction* instruction)
334       : SlowPathCode(instruction), entry_label_(), exit_label_() {}
335 
GetEntryLabel()336   vixl::aarch32::Label* GetEntryLabel() { return &entry_label_; }
GetExitLabel()337   vixl::aarch32::Label* GetExitLabel() { return &exit_label_; }
338 
339   void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
340   void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
341 
342  private:
343   vixl::aarch32::Label entry_label_;
344   vixl::aarch32::Label exit_label_;
345 
346   DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARMVIXL);
347 };
348 
349 class ParallelMoveResolverARMVIXL : public ParallelMoveResolverWithSwap {
350  public:
ParallelMoveResolverARMVIXL(ArenaAllocator * allocator,CodeGeneratorARMVIXL * codegen)351   ParallelMoveResolverARMVIXL(ArenaAllocator* allocator, CodeGeneratorARMVIXL* codegen)
352       : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
353 
354   void EmitMove(size_t index) override;
355   void EmitSwap(size_t index) override;
356   void SpillScratch(int reg) override;
357   void RestoreScratch(int reg) override;
358 
359   ArmVIXLAssembler* GetAssembler() const;
360 
361  private:
362   void Exchange(vixl32::Register reg, int mem);
363   void Exchange(int mem1, int mem2);
364 
365   CodeGeneratorARMVIXL* const codegen_;
366 
367   DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverARMVIXL);
368 };
369 
370 class LocationsBuilderARMVIXL : public HGraphVisitor {
371  public:
LocationsBuilderARMVIXL(HGraph * graph,CodeGeneratorARMVIXL * codegen)372   LocationsBuilderARMVIXL(HGraph* graph, CodeGeneratorARMVIXL* codegen)
373       : HGraphVisitor(graph), codegen_(codegen) {}
374 
375 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
376   void Visit##name(H##name* instr) override;
377 
378   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)379   FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)
380   FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION)
381 
382 #undef DECLARE_VISIT_INSTRUCTION
383 
384   void VisitInstruction(HInstruction* instruction) override {
385     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
386                << " (id " << instruction->GetId() << ")";
387   }
388 
389  private:
390   void HandleInvoke(HInvoke* invoke);
391   void HandleBitwiseOperation(HBinaryOperation* operation, Opcode opcode);
392   void HandleCondition(HCondition* condition);
393   void HandleShift(HBinaryOperation* operation);
394   void HandleFieldSet(HInstruction* instruction,
395                       const FieldInfo& field_info,
396                       WriteBarrierKind write_barrier_kind);
397   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
398   void HandleRotate(HBinaryOperation* rotate);
399 
400   Location ArithmeticZeroOrFpuRegister(HInstruction* input);
401   Location ArmEncodableConstantOrRegister(HInstruction* constant, Opcode opcode);
402   bool CanEncodeConstantAsImmediate(HConstant* input_cst, Opcode opcode);
403 
404   CodeGeneratorARMVIXL* const codegen_;
405   InvokeDexCallingConventionVisitorARMVIXL parameter_visitor_;
406 
407   DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARMVIXL);
408 };
409 
410 class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator {
411  public:
412   InstructionCodeGeneratorARMVIXL(HGraph* graph, CodeGeneratorARMVIXL* codegen);
413 
414 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
415   void Visit##name(H##name* instr) override;
416 
417   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)418   FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)
419   FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION)
420 
421 #undef DECLARE_VISIT_INSTRUCTION
422 
423   void VisitInstruction(HInstruction* instruction) override {
424     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
425                << " (id " << instruction->GetId() << ")";
426   }
427 
GetAssembler()428   ArmVIXLAssembler* GetAssembler() const { return assembler_; }
GetVIXLAssembler()429   ArmVIXLMacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
430 
431   void GenerateAndConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value);
432 
433  private:
434   // Generate code for the given suspend check. If not null, `successor`
435   // is the block to branch to if the suspend check is not needed, and after
436   // the suspend call.
437   void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
438   void GenerateClassInitializationCheck(LoadClassSlowPathARMVIXL* slow_path,
439                                         vixl32::Register class_reg);
440   void GenerateBitstringTypeCheckCompare(HTypeCheckInstruction* check,
441                                          vixl::aarch32::Register temp,
442                                          vixl::aarch32::FlagsUpdate flags_update);
443   void GenerateOrrConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value);
444   void GenerateEorConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value);
445   void GenerateAddLongConst(Location out, Location first, uint64_t value);
446   void HandleBitwiseOperation(HBinaryOperation* operation);
447   void HandleCondition(HCondition* condition);
448   void HandleIntegerRotate(HBinaryOperation* rotate);
449   void HandleLongRotate(HBinaryOperation* rotate);
450   void HandleRotate(HBinaryOperation* rotate);
451   void HandleShift(HBinaryOperation* operation);
452 
453   void GenerateWideAtomicStore(vixl::aarch32::Register addr,
454                                uint32_t offset,
455                                vixl::aarch32::Register value_lo,
456                                vixl::aarch32::Register value_hi,
457                                vixl::aarch32::Register temp1,
458                                vixl::aarch32::Register temp2,
459                                HInstruction* instruction);
460   void GenerateWideAtomicLoad(vixl::aarch32::Register addr,
461                               uint32_t offset,
462                               vixl::aarch32::Register out_lo,
463                               vixl::aarch32::Register out_hi);
464 
465   void HandleFieldSet(HInstruction* instruction,
466                       const FieldInfo& field_info,
467                       bool value_can_be_null,
468                       WriteBarrierKind write_barrier_kind);
469   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
470 
471   void GenerateMinMaxInt(LocationSummary* locations, bool is_min);
472   void GenerateMinMaxLong(LocationSummary* locations, bool is_min);
473   void GenerateMinMaxFloat(HInstruction* minmax, bool is_min);
474   void GenerateMinMaxDouble(HInstruction* minmax, bool is_min);
475   void GenerateMinMax(HBinaryOperation* minmax, bool is_min);
476 
477   // Generate a heap reference load using one register `out`:
478   //
479   //   out <- *(out + offset)
480   //
481   // while honoring heap poisoning and/or read barriers (if any).
482   //
483   // Location `maybe_temp` is used when generating a read barrier and
484   // shall be a register in that case; it may be an invalid location
485   // otherwise.
486   void GenerateReferenceLoadOneRegister(HInstruction* instruction,
487                                         Location out,
488                                         uint32_t offset,
489                                         Location maybe_temp,
490                                         ReadBarrierOption read_barrier_option);
491   // Generate a heap reference load using two different registers
492   // `out` and `obj`:
493   //
494   //   out <- *(obj + offset)
495   //
496   // while honoring heap poisoning and/or read barriers (if any).
497   //
498   // Location `maybe_temp` is used when generating a Baker's (fast
499   // path) read barrier and shall be a register in that case; it may
500   // be an invalid location otherwise.
501   void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
502                                          Location out,
503                                          Location obj,
504                                          uint32_t offset,
505                                          Location maybe_temp,
506                                          ReadBarrierOption read_barrier_option);
507   void GenerateTestAndBranch(HInstruction* instruction,
508                              size_t condition_input_index,
509                              vixl::aarch32::Label* true_target,
510                              vixl::aarch32::Label* false_target,
511                              bool far_target = true);
512   void GenerateCompareTestAndBranch(HCondition* condition,
513                                     vixl::aarch32::Label* true_target,
514                                     vixl::aarch32::Label* false_target,
515                                     bool is_far_target = true);
516   void DivRemOneOrMinusOne(HBinaryOperation* instruction);
517   void DivRemByPowerOfTwo(HBinaryOperation* instruction);
518   void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
519   void GenerateDivRemConstantIntegral(HBinaryOperation* instruction);
520   void HandleGoto(HInstruction* got, HBasicBlock* successor);
521   void GenerateMethodEntryExitHook(HInstruction* instruction);
522 
523   vixl::aarch32::MemOperand VecAddress(
524       HVecMemoryOperation* instruction,
525       // This function may acquire a scratch register.
526       vixl::aarch32::UseScratchRegisterScope* temps_scope,
527       /*out*/ vixl32::Register* scratch);
528   vixl::aarch32::AlignedMemOperand VecAddressUnaligned(
529       HVecMemoryOperation* instruction,
530       // This function may acquire a scratch register.
531       vixl::aarch32::UseScratchRegisterScope* temps_scope,
532       /*out*/ vixl32::Register* scratch);
533 
534   ArmVIXLAssembler* const assembler_;
535   CodeGeneratorARMVIXL* const codegen_;
536 
537   DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorARMVIXL);
538 };
539 
540 class CodeGeneratorARMVIXL : public CodeGenerator {
541  public:
542   CodeGeneratorARMVIXL(HGraph* graph,
543                        const CompilerOptions& compiler_options,
544                        OptimizingCompilerStats* stats = nullptr);
~CodeGeneratorARMVIXL()545   virtual ~CodeGeneratorARMVIXL() {}
546 
547   void GenerateFrameEntry() override;
548   void GenerateFrameExit() override;
549   void Bind(HBasicBlock* block) override;
550   void MoveConstant(Location destination, int32_t value) override;
551   void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
552   void AddLocationAsTemp(Location location, LocationSummary* locations) override;
553 
554   size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
555   size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
556   size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
557   size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
558 
GetWordSize()559   size_t GetWordSize() const override {
560     return static_cast<size_t>(kArmPointerSize);
561   }
562 
GetCalleePreservedFPWidth()563   size_t GetCalleePreservedFPWidth() const override {
564     return vixl::aarch32::kSRegSizeInBytes;
565   }
566 
GetSIMDRegisterWidth()567   size_t GetSIMDRegisterWidth() const override {
568     // ARM 32-bit backend doesn't support Q registers in vectorizer, only D
569     // registers (due to register allocator restrictions: overlapping s/d/q
570     // registers).
571     return vixl::aarch32::kDRegSizeInBytes;
572   }
573 
GetLocationBuilder()574   HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
575 
GetInstructionVisitor()576   HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
577 
GetAssembler()578   ArmVIXLAssembler* GetAssembler() override { return &assembler_; }
579 
GetAssembler()580   const ArmVIXLAssembler& GetAssembler() const override { return assembler_; }
581 
GetVIXLAssembler()582   ArmVIXLMacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
583 
GetAddressOf(HBasicBlock * block)584   uintptr_t GetAddressOf(HBasicBlock* block) override {
585     vixl::aarch32::Label* block_entry_label = GetLabelOf(block);
586     DCHECK(block_entry_label->IsBound());
587     return block_entry_label->GetLocation();
588   }
589 
590   void FixJumpTables();
591   void SetupBlockedRegisters() const override;
592 
593   void DumpCoreRegister(std::ostream& stream, int reg) const override;
594   void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
595 
GetMoveResolver()596   ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; }
GetInstructionSet()597   InstructionSet GetInstructionSet() const override { return InstructionSet::kThumb2; }
598 
599   const ArmInstructionSetFeatures& GetInstructionSetFeatures() const;
600 
601   // Helper method to move a 32-bit value between two locations.
602   void Move32(Location destination, Location source);
603 
604   void LoadFromShiftedRegOffset(DataType::Type type,
605                                 Location out_loc,
606                                 vixl::aarch32::Register base,
607                                 vixl::aarch32::Register reg_index,
608                                 vixl::aarch32::Condition cond = vixl::aarch32::al);
609   void StoreToShiftedRegOffset(DataType::Type type,
610                                Location out_loc,
611                                vixl::aarch32::Register base,
612                                vixl::aarch32::Register reg_index,
613                                vixl::aarch32::Condition cond = vixl::aarch32::al);
614 
615   // Generate code to invoke a runtime entry point.
616   void InvokeRuntime(QuickEntrypointEnum entrypoint,
617                      HInstruction* instruction,
618                      uint32_t dex_pc,
619                      SlowPathCode* slow_path = nullptr) override;
620 
621   // Generate code to invoke a runtime entry point, but do not record
622   // PC-related information in a stack map.
623   void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
624                                            HInstruction* instruction,
625                                            SlowPathCode* slow_path);
626 
627   // Emit a write barrier if:
628   // A) emit_null_check is false
629   // B) emit_null_check is true, and value is not null.
630   void MaybeMarkGCCard(vixl::aarch32::Register temp,
631                        vixl::aarch32::Register card,
632                        vixl::aarch32::Register object,
633                        vixl::aarch32::Register value,
634                        bool emit_null_check);
635 
636   // Emit a write barrier unconditionally.
637   void MarkGCCard(vixl::aarch32::Register temp,
638                   vixl::aarch32::Register card,
639                   vixl::aarch32::Register object);
640 
641   // Crash if the card table is not valid. This check is only emitted for the CC GC. We assert
642   // `(!clean || !self->is_gc_marking)`, since the card table should not be set to clean when the CC
643   // GC is marking for eliminated write barriers.
644   void CheckGCCardIsValid(vixl::aarch32::Register temp,
645                           vixl::aarch32::Register card,
646                           vixl::aarch32::Register object);
647 
648   void GenerateMemoryBarrier(MemBarrierKind kind);
649 
GetLabelOf(HBasicBlock * block)650   vixl::aarch32::Label* GetLabelOf(HBasicBlock* block) {
651     block = FirstNonEmptyBlock(block);
652     return &(block_labels_[block->GetBlockId()]);
653   }
654 
655   vixl32::Label* GetFinalLabel(HInstruction* instruction, vixl32::Label* final_label);
656 
Initialize()657   void Initialize() override {
658     block_labels_.resize(GetGraph()->GetBlocks().size());
659   }
660 
661   void Finalize() override;
662 
NeedsTwoRegisters(DataType::Type type)663   bool NeedsTwoRegisters(DataType::Type type) const override {
664     return type == DataType::Type::kFloat64 || type == DataType::Type::kInt64;
665   }
666 
667   void ComputeSpillMask() override;
668 
GetFrameEntryLabel()669   vixl::aarch32::Label* GetFrameEntryLabel() { return &frame_entry_label_; }
670 
671   // Check if the desired_string_load_kind is supported. If it is, return it,
672   // otherwise return a fall-back kind that should be used instead.
673   HLoadString::LoadKind GetSupportedLoadStringKind(
674       HLoadString::LoadKind desired_string_load_kind) override;
675 
676   // Check if the desired_class_load_kind is supported. If it is, return it,
677   // otherwise return a fall-back kind that should be used instead.
678   HLoadClass::LoadKind GetSupportedLoadClassKind(
679       HLoadClass::LoadKind desired_class_load_kind) override;
680 
681   // Check if the desired_dispatch_info is supported. If it is, return it,
682   // otherwise return a fall-back info that should be used instead.
683   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
684       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
685       ArtMethod* method) override;
686 
687   void LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke);
688   void GenerateStaticOrDirectCall(
689       HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
690   void GenerateVirtualCall(
691       HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
692 
693   void MoveFromReturnRegister(Location trg, DataType::Type type) override;
694 
695   // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
696   // whether through .data.img.rel.ro, .bss, or directly in the boot image.
697   //
698   // The PC-relative address is loaded with three instructions,
699   // MOVW+MOVT to load the offset to base_reg and then ADD base_reg, PC. The offset
700   // is calculated from the ADD's effective PC, i.e. PC+4 on Thumb2. Though we
701   // currently emit these 3 instructions together, instruction scheduling could
702   // split this sequence apart, so we keep separate labels for each of them.
703   struct PcRelativePatchInfo {
PcRelativePatchInfoPcRelativePatchInfo704     PcRelativePatchInfo(const DexFile* dex_file, uint32_t off_or_idx)
705         : target_dex_file(dex_file), offset_or_index(off_or_idx) { }
706 
707     // Target dex file or null for boot image .data.img.rel.ro patches.
708     const DexFile* target_dex_file;
709     // Either the boot image offset (to write to .data.img.rel.ro) or string/type/method index.
710     uint32_t offset_or_index;
711     vixl::aarch32::Label movw_label;
712     vixl::aarch32::Label movt_label;
713     vixl::aarch32::Label add_pc_label;
714   };
715 
716   PcRelativePatchInfo* NewBootImageIntrinsicPatch(uint32_t intrinsic_data);
717   PcRelativePatchInfo* NewBootImageRelRoPatch(uint32_t boot_image_offset);
718   PcRelativePatchInfo* NewBootImageMethodPatch(MethodReference target_method);
719   PcRelativePatchInfo* NewAppImageMethodPatch(MethodReference target_method);
720   PcRelativePatchInfo* NewMethodBssEntryPatch(MethodReference target_method);
721   PcRelativePatchInfo* NewBootImageTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
722   PcRelativePatchInfo* NewAppImageTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
723   PcRelativePatchInfo* NewTypeBssEntryPatch(HLoadClass* load_class);
724   PcRelativePatchInfo* NewBootImageStringPatch(const DexFile& dex_file,
725                                                dex::StringIndex string_index);
726   PcRelativePatchInfo* NewStringBssEntryPatch(const DexFile& dex_file,
727                                               dex::StringIndex string_index);
728 
729   // Emit the BL instruction for entrypoint thunk call and record the associated patch for AOT.
730   void EmitEntrypointThunkCall(ThreadOffset32 entrypoint_offset);
731 
732   // Emit the BNE instruction for baker read barrier and record
733   // the associated patch for AOT or slow path for JIT.
734   void EmitBakerReadBarrierBne(uint32_t custom_data);
735 
736   VIXLUInt32Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
737   VIXLUInt32Literal* DeduplicateJitStringLiteral(const DexFile& dex_file,
738                                                  dex::StringIndex string_index,
739                                                  Handle<mirror::String> handle);
740   VIXLUInt32Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
741                                                 dex::TypeIndex type_index,
742                                                 Handle<mirror::Class> handle);
743 
744   void LoadBootImageRelRoEntry(vixl::aarch32::Register reg, uint32_t boot_image_offset);
745   void LoadBootImageAddress(vixl::aarch32::Register reg, uint32_t boot_image_reference);
746   void LoadTypeForBootImageIntrinsic(vixl::aarch32::Register reg, TypeReference type_reference);
747   void LoadIntrinsicDeclaringClass(vixl::aarch32::Register reg, HInvoke* invoke);
748   void LoadClassRootForIntrinsic(vixl::aarch32::Register reg, ClassRoot class_root);
749 
750   void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
751   bool NeedsThunkCode(const linker::LinkerPatch& patch) const override;
752   void EmitThunkCode(const linker::LinkerPatch& patch,
753                      /*out*/ ArenaVector<uint8_t>* code,
754                      /*out*/ std::string* debug_name) override;
755 
756   void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
757 
758   // Generate a GC root reference load:
759   //
760   //   root <- *(obj + offset)
761   //
762   // while honoring read barriers based on read_barrier_option.
763   void GenerateGcRootFieldLoad(HInstruction* instruction,
764                                Location root,
765                                vixl::aarch32::Register obj,
766                                uint32_t offset,
767                                ReadBarrierOption read_barrier_option);
768   // Generate MOV for an intrinsic to mark the old value with Baker read barrier.
769   void GenerateIntrinsicMoveWithBakerReadBarrier(vixl::aarch32::Register marked_old_value,
770                                                  vixl::aarch32::Register old_value);
771   // Fast path implementation of ReadBarrier::Barrier for a heap
772   // reference field load when Baker's read barriers are used.
773   // Overload suitable for Unsafe.getObject/-Volatile() intrinsic.
774   void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
775                                              Location ref,
776                                              vixl::aarch32::Register obj,
777                                              const vixl::aarch32::MemOperand& src,
778                                              bool needs_null_check);
779   // Fast path implementation of ReadBarrier::Barrier for a heap
780   // reference field load when Baker's read barriers are used.
781   void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
782                                              Location ref,
783                                              vixl::aarch32::Register obj,
784                                              uint32_t offset,
785                                              Location maybe_temp,
786                                              bool needs_null_check);
787   // Fast path implementation of ReadBarrier::Barrier for a heap
788   // reference array load when Baker's read barriers are used.
789   void GenerateArrayLoadWithBakerReadBarrier(Location ref,
790                                              vixl::aarch32::Register obj,
791                                              uint32_t data_offset,
792                                              Location index,
793                                              Location temp,
794                                              bool needs_null_check);
795 
796   // Emit code checking the status of the Marking Register, and
797   // aborting the program if MR does not match the value stored in the
798   // art::Thread object. Code is only emitted in debug mode and if
799   // CompilerOptions::EmitRunTimeChecksInDebugMode returns true.
800   //
801   // Argument `code` is used to identify the different occurrences of
802   // MaybeGenerateMarkingRegisterCheck in the code generator, and is
803   // used together with kMarkingRegisterCheckBreakCodeBaseCode to
804   // create the value passed to the BKPT instruction. Note that unlike
805   // in the ARM64 code generator, where `__LINE__` is passed as `code`
806   // argument to
807   // CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck, we cannot
808   // realistically do that here, as Encoding T1 for the BKPT
809   // instruction only accepts 8-bit immediate values.
810   //
811   // If `temp_loc` is a valid location, it is expected to be a
812   // register and will be used as a temporary to generate code;
813   // otherwise, a temporary will be fetched from the core register
814   // scratch pool.
815   virtual void MaybeGenerateMarkingRegisterCheck(int code,
816                                                  Location temp_loc = Location::NoLocation());
817 
818   // Create slow path for a read barrier for a heap reference within `instruction`.
819   //
820   // This is a helper function for GenerateReadBarrierSlow() that has the same
821   // arguments. The creation and adding of the slow path is exposed for intrinsics
822   // that cannot use GenerateReadBarrierSlow() from their own slow paths.
823   SlowPathCodeARMVIXL* AddReadBarrierSlowPath(HInstruction* instruction,
824                                               Location out,
825                                               Location ref,
826                                               Location obj,
827                                               uint32_t offset,
828                                               Location index);
829 
830   // Generate a read barrier for a heap reference within `instruction`
831   // using a slow path.
832   //
833   // A read barrier for an object reference read from the heap is
834   // implemented as a call to the artReadBarrierSlow runtime entry
835   // point, which is passed the values in locations `ref`, `obj`, and
836   // `offset`:
837   //
838   //   mirror::Object* artReadBarrierSlow(mirror::Object* ref,
839   //                                      mirror::Object* obj,
840   //                                      uint32_t offset);
841   //
842   // The `out` location contains the value returned by
843   // artReadBarrierSlow.
844   //
845   // When `index` is provided (i.e. for array accesses), the offset
846   // value passed to artReadBarrierSlow is adjusted to take `index`
847   // into account.
848   void GenerateReadBarrierSlow(HInstruction* instruction,
849                                Location out,
850                                Location ref,
851                                Location obj,
852                                uint32_t offset,
853                                Location index = Location::NoLocation());
854 
855   // If read barriers are enabled, generate a read barrier for a heap
856   // reference using a slow path. If heap poisoning is enabled, also
857   // unpoison the reference in `out`.
858   void MaybeGenerateReadBarrierSlow(HInstruction* instruction,
859                                     Location out,
860                                     Location ref,
861                                     Location obj,
862                                     uint32_t offset,
863                                     Location index = Location::NoLocation());
864 
865   // Generate a read barrier for a GC root within `instruction` using
866   // a slow path.
867   //
868   // A read barrier for an object reference GC root is implemented as
869   // a call to the artReadBarrierForRootSlow runtime entry point,
870   // which is passed the value in location `root`:
871   //
872   //   mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root);
873   //
874   // The `out` location contains the value returned by
875   // artReadBarrierForRootSlow.
876   void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
877 
878   void IncreaseFrame(size_t adjustment) override;
879   void DecreaseFrame(size_t adjustment) override;
880 
881   void GenerateNop() override;
882 
883   void GenerateImplicitNullCheck(HNullCheck* instruction) override;
884   void GenerateExplicitNullCheck(HNullCheck* instruction) override;
885 
CreateJumpTable(HPackedSwitch * switch_instr)886   JumpTableARMVIXL* CreateJumpTable(HPackedSwitch* switch_instr) {
887     jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARMVIXL(switch_instr));
888     return jump_tables_.back().get();
889   }
890   void EmitJumpTables();
891 
892   void EmitMovwMovtPlaceholder(CodeGeneratorARMVIXL::PcRelativePatchInfo* labels,
893                                vixl::aarch32::Register out);
894 
895   // `temp` is an extra temporary register that is used for some conditions;
896   // callers may not specify it, in which case the method will use a scratch
897   // register instead.
898   void GenerateConditionWithZero(IfCondition condition,
899                                  vixl::aarch32::Register out,
900                                  vixl::aarch32::Register in,
901                                  vixl::aarch32::Register temp = vixl32::Register());
902 
MaybeRecordImplicitNullCheck(HInstruction * instr)903   void MaybeRecordImplicitNullCheck(HInstruction* instr) final {
904     // The function must be only be called within special scopes
905     // (EmissionCheckScope, ExactAssemblyScope) which prevent generation of
906     // veneer/literal pools by VIXL assembler.
907     CHECK_EQ(GetVIXLAssembler()->ArePoolsBlocked(), true)
908         << "The function must only be called within EmissionCheckScope or ExactAssemblyScope";
909     CodeGenerator::MaybeRecordImplicitNullCheck(instr);
910   }
911 
912   void MaybeGenerateInlineCacheCheck(HInstruction* instruction, vixl32::Register klass);
913   void MaybeIncrementHotness(HSuspendCheck* suspend_check, bool is_frame_entry);
914 
915  private:
916   // Encoding of thunk type and data for link-time generated thunks for Baker read barriers.
917 
918   enum class BakerReadBarrierKind : uint8_t {
919     kField,         // Field get or array get with constant offset (i.e. constant index).
920     kArray,         // Array get with index in register.
921     kGcRoot,        // GC root load.
922     kIntrinsicCas,  // Unsafe/VarHandle CAS intrinsic.
923     kLast = kIntrinsicCas
924   };
925 
926   enum class BakerReadBarrierWidth : uint8_t {
927     kWide,          // 32-bit LDR (and 32-bit NEG if heap poisoning is enabled).
928     kNarrow,        // 16-bit LDR (and 16-bit NEG if heap poisoning is enabled).
929     kLast = kNarrow
930   };
931 
932   static constexpr uint32_t kBakerReadBarrierInvalidEncodedReg = /* pc is invalid */ 15u;
933 
934   static constexpr size_t kBitsForBakerReadBarrierKind =
935       MinimumBitsToStore(static_cast<size_t>(BakerReadBarrierKind::kLast));
936   static constexpr size_t kBakerReadBarrierBitsForRegister =
937       MinimumBitsToStore(kBakerReadBarrierInvalidEncodedReg);
938   using BakerReadBarrierKindField =
939       BitField<BakerReadBarrierKind, 0, kBitsForBakerReadBarrierKind>;
940   using BakerReadBarrierFirstRegField =
941       BitField<uint32_t, kBitsForBakerReadBarrierKind, kBakerReadBarrierBitsForRegister>;
942   using BakerReadBarrierSecondRegField =
943       BitField<uint32_t,
944                kBitsForBakerReadBarrierKind + kBakerReadBarrierBitsForRegister,
945                kBakerReadBarrierBitsForRegister>;
946   static constexpr size_t kBitsForBakerReadBarrierWidth =
947       MinimumBitsToStore(static_cast<size_t>(BakerReadBarrierWidth::kLast));
948   using BakerReadBarrierWidthField =
949       BitField<BakerReadBarrierWidth,
950                kBitsForBakerReadBarrierKind + 2 * kBakerReadBarrierBitsForRegister,
951                kBitsForBakerReadBarrierWidth>;
952 
CheckValidReg(uint32_t reg)953   static void CheckValidReg(uint32_t reg) {
954     DCHECK(reg < vixl::aarch32::ip.GetCode() && reg != mr.GetCode()) << reg;
955   }
956 
EncodeBakerReadBarrierFieldData(uint32_t base_reg,uint32_t holder_reg,bool narrow)957   static uint32_t EncodeBakerReadBarrierFieldData(uint32_t base_reg,
958                                                   uint32_t holder_reg,
959                                                   bool narrow) {
960     CheckValidReg(base_reg);
961     CheckValidReg(holder_reg);
962     DCHECK_IMPLIES(narrow, base_reg < 8u) << base_reg;
963     BakerReadBarrierWidth width =
964         narrow ? BakerReadBarrierWidth::kNarrow : BakerReadBarrierWidth::kWide;
965     return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kField) |
966            BakerReadBarrierFirstRegField::Encode(base_reg) |
967            BakerReadBarrierSecondRegField::Encode(holder_reg) |
968            BakerReadBarrierWidthField::Encode(width);
969   }
970 
EncodeBakerReadBarrierArrayData(uint32_t base_reg)971   static uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) {
972     CheckValidReg(base_reg);
973     return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kArray) |
974            BakerReadBarrierFirstRegField::Encode(base_reg) |
975            BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg) |
976            BakerReadBarrierWidthField::Encode(BakerReadBarrierWidth::kWide);
977   }
978 
EncodeBakerReadBarrierGcRootData(uint32_t root_reg,bool narrow)979   static uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg, bool narrow) {
980     CheckValidReg(root_reg);
981     DCHECK_IMPLIES(narrow, root_reg < 8u) << root_reg;
982     BakerReadBarrierWidth width =
983         narrow ? BakerReadBarrierWidth::kNarrow : BakerReadBarrierWidth::kWide;
984     return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kGcRoot) |
985            BakerReadBarrierFirstRegField::Encode(root_reg) |
986            BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg) |
987            BakerReadBarrierWidthField::Encode(width);
988   }
989 
EncodeBakerReadBarrierIntrinsicCasData(uint32_t root_reg)990   static uint32_t EncodeBakerReadBarrierIntrinsicCasData(uint32_t root_reg) {
991     CheckValidReg(root_reg);
992     return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kIntrinsicCas) |
993            BakerReadBarrierFirstRegField::Encode(root_reg) |
994            BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg) |
995            BakerReadBarrierWidthField::Encode(BakerReadBarrierWidth::kWide);
996   }
997 
998   void CompileBakerReadBarrierThunk(ArmVIXLAssembler& assembler,
999                                     uint32_t encoded_data,
1000                                     /*out*/ std::string* debug_name);
1001 
1002   using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, VIXLUInt32Literal*>;
1003   using StringToLiteralMap = ArenaSafeMap<StringReference,
1004                                           VIXLUInt32Literal*,
1005                                           StringReferenceValueComparator>;
1006   using TypeToLiteralMap = ArenaSafeMap<TypeReference,
1007                                         VIXLUInt32Literal*,
1008                                         TypeReferenceValueComparator>;
1009 
1010   struct BakerReadBarrierPatchInfo {
BakerReadBarrierPatchInfoBakerReadBarrierPatchInfo1011     explicit BakerReadBarrierPatchInfo(uint32_t data) : label(), custom_data(data) { }
1012 
1013     vixl::aarch32::Label label;
1014     uint32_t custom_data;
1015   };
1016 
1017   VIXLUInt32Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map);
1018   PcRelativePatchInfo* NewPcRelativePatch(const DexFile* dex_file,
1019                                           uint32_t offset_or_index,
1020                                           ArenaDeque<PcRelativePatchInfo>* patches);
1021   template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
1022   static void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos,
1023                                           ArenaVector<linker::LinkerPatch>* linker_patches);
1024 
1025   // Labels for each block that will be compiled.
1026   // We use a deque so that the `vixl::aarch32::Label` objects do not move in memory.
1027   ArenaDeque<vixl::aarch32::Label> block_labels_;  // Indexed by block id.
1028   vixl::aarch32::Label frame_entry_label_;
1029 
1030   ArenaVector<std::unique_ptr<JumpTableARMVIXL>> jump_tables_;
1031   LocationsBuilderARMVIXL location_builder_;
1032   InstructionCodeGeneratorARMVIXL instruction_visitor_;
1033   ParallelMoveResolverARMVIXL move_resolver_;
1034 
1035   ArmVIXLAssembler assembler_;
1036 
1037   // PC-relative method patch info for kBootImageLinkTimePcRelative.
1038   ArenaDeque<PcRelativePatchInfo> boot_image_method_patches_;
1039   // PC-relative method patch info for kAppImageRelRo.
1040   ArenaDeque<PcRelativePatchInfo> app_image_method_patches_;
1041   // PC-relative method patch info for kBssEntry.
1042   ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
1043   // PC-relative type patch info for kBootImageLinkTimePcRelative.
1044   ArenaDeque<PcRelativePatchInfo> boot_image_type_patches_;
1045   // PC-relative type patch info for kAppImageRelRo.
1046   ArenaDeque<PcRelativePatchInfo> app_image_type_patches_;
1047   // PC-relative type patch info for kBssEntry.
1048   ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
1049   // PC-relative public type patch info for kBssEntryPublic.
1050   ArenaDeque<PcRelativePatchInfo> public_type_bss_entry_patches_;
1051   // PC-relative package type patch info for kBssEntryPackage.
1052   ArenaDeque<PcRelativePatchInfo> package_type_bss_entry_patches_;
1053   // PC-relative String patch info for kBootImageLinkTimePcRelative.
1054   ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_;
1055   // PC-relative String patch info for kBssEntry.
1056   ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_;
1057   // PC-relative patch info for IntrinsicObjects for the boot image,
1058   // and for method/type/string patches for kBootImageRelRo otherwise.
1059   ArenaDeque<PcRelativePatchInfo> boot_image_other_patches_;
1060   // Patch info for calls to entrypoint dispatch thunks. Used for slow paths.
1061   ArenaDeque<PatchInfo<vixl::aarch32::Label>> call_entrypoint_patches_;
1062   // Baker read barrier patch info.
1063   ArenaDeque<BakerReadBarrierPatchInfo> baker_read_barrier_patches_;
1064 
1065   // Deduplication map for 32-bit literals, used for JIT for boot image addresses.
1066   Uint32ToLiteralMap uint32_literals_;
1067   // Patches for string literals in JIT compiled code.
1068   StringToLiteralMap jit_string_patches_;
1069   // Patches for class literals in JIT compiled code.
1070   TypeToLiteralMap jit_class_patches_;
1071 
1072   // Baker read barrier slow paths, mapping custom data (uint32_t) to label.
1073   // Wrap the label to work around vixl::aarch32::Label being non-copyable
1074   // and non-moveable and as such unusable in ArenaSafeMap<>.
1075   struct LabelWrapper {
LabelWrapperLabelWrapper1076     LabelWrapper(const LabelWrapper& src)
1077         : label() {
1078       DCHECK(!src.label.IsReferenced() && !src.label.IsBound());
1079     }
1080     LabelWrapper() = default;
1081     vixl::aarch32::Label label;
1082   };
1083   ArenaSafeMap<uint32_t, LabelWrapper> jit_baker_read_barrier_slow_paths_;
1084 
1085   friend class linker::Thumb2RelativePatcherTest;
1086   DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARMVIXL);
1087 };
1088 
1089 }  // namespace arm
1090 }  // namespace art
1091 
1092 #endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_
1093