xref: /aosp_15_r20/art/compiler/optimizing/common_arm64.h (revision 795d594fd825385562da6b089ea9b2033f3abf5a)
1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_OPTIMIZING_COMMON_ARM64_H_
18 #define ART_COMPILER_OPTIMIZING_COMMON_ARM64_H_
19 
20 #include "base/macros.h"
21 #include "code_generator.h"
22 #include "instruction_simplifier_shared.h"
23 #include "locations.h"
24 #include "nodes.h"
25 #include "utils/arm64/assembler_arm64.h"
26 
27 // TODO(VIXL): Make VIXL compile cleanly with -Wshadow, -Wdeprecated-declarations.
28 #pragma GCC diagnostic push
29 #pragma GCC diagnostic ignored "-Wshadow"
30 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
31 #include "aarch64/disasm-aarch64.h"
32 #include "aarch64/macro-assembler-aarch64.h"
33 #include "aarch64/simulator-aarch64.h"
34 #pragma GCC diagnostic pop
35 
36 namespace art HIDDEN {
37 
38 using helpers::CanFitInShifterOperand;
39 using helpers::HasShifterOperand;
40 
41 namespace arm64 {
42 namespace helpers {
43 
44 // Convenience helpers to ease conversion to and from VIXL operands.
45 static_assert((SP == 31) && (WSP == 31) && (XZR == 32) && (WZR == 32),
46               "Unexpected values for register codes.");
47 
VIXLRegCodeFromART(int code)48 inline int VIXLRegCodeFromART(int code) {
49   if (code == SP) {
50     return vixl::aarch64::kSPRegInternalCode;
51   }
52   if (code == XZR) {
53     return vixl::aarch64::kZeroRegCode;
54   }
55   return code;
56 }
57 
ARTRegCodeFromVIXL(int code)58 inline int ARTRegCodeFromVIXL(int code) {
59   if (code == vixl::aarch64::kSPRegInternalCode) {
60     return SP;
61   }
62   if (code == vixl::aarch64::kZeroRegCode) {
63     return XZR;
64   }
65   return code;
66 }
67 
XRegisterFrom(Location location)68 inline vixl::aarch64::Register XRegisterFrom(Location location) {
69   DCHECK(location.IsRegister()) << location;
70   return vixl::aarch64::XRegister(VIXLRegCodeFromART(location.reg()));
71 }
72 
WRegisterFrom(Location location)73 inline vixl::aarch64::Register WRegisterFrom(Location location) {
74   DCHECK(location.IsRegister()) << location;
75   return vixl::aarch64::WRegister(VIXLRegCodeFromART(location.reg()));
76 }
77 
RegisterFrom(Location location,DataType::Type type)78 inline vixl::aarch64::Register RegisterFrom(Location location, DataType::Type type) {
79   DCHECK(type != DataType::Type::kVoid && !DataType::IsFloatingPointType(type)) << type;
80   return type == DataType::Type::kInt64 ? XRegisterFrom(location) : WRegisterFrom(location);
81 }
82 
OutputRegister(HInstruction * instr)83 inline vixl::aarch64::Register OutputRegister(HInstruction* instr) {
84   return RegisterFrom(instr->GetLocations()->Out(), instr->GetType());
85 }
86 
InputRegisterAt(HInstruction * instr,int input_index)87 inline vixl::aarch64::Register InputRegisterAt(HInstruction* instr, int input_index) {
88   return RegisterFrom(instr->GetLocations()->InAt(input_index),
89                       instr->InputAt(input_index)->GetType());
90 }
91 
DRegisterFrom(Location location)92 inline vixl::aarch64::VRegister DRegisterFrom(Location location) {
93   DCHECK(location.IsFpuRegister()) << location;
94   return vixl::aarch64::DRegister(location.reg());
95 }
96 
QRegisterFrom(Location location)97 inline vixl::aarch64::VRegister QRegisterFrom(Location location) {
98   DCHECK(location.IsFpuRegister()) << location;
99   return vixl::aarch64::QRegister(location.reg());
100 }
101 
VRegisterFrom(Location location)102 inline vixl::aarch64::VRegister VRegisterFrom(Location location) {
103   DCHECK(location.IsFpuRegister()) << location;
104   return vixl::aarch64::VRegister(location.reg());
105 }
106 
ZRegisterFrom(Location location)107 inline vixl::aarch64::ZRegister ZRegisterFrom(Location location) {
108   DCHECK(location.IsFpuRegister()) << location;
109   return vixl::aarch64::ZRegister(location.reg());
110 }
111 
SRegisterFrom(Location location)112 inline vixl::aarch64::VRegister SRegisterFrom(Location location) {
113   DCHECK(location.IsFpuRegister()) << location;
114   return vixl::aarch64::SRegister(location.reg());
115 }
116 
HRegisterFrom(Location location)117 inline vixl::aarch64::VRegister HRegisterFrom(Location location) {
118   DCHECK(location.IsFpuRegister()) << location;
119   return vixl::aarch64::HRegister(location.reg());
120 }
121 
FPRegisterFrom(Location location,DataType::Type type)122 inline vixl::aarch64::VRegister FPRegisterFrom(Location location, DataType::Type type) {
123   DCHECK(DataType::IsFloatingPointType(type)) << type;
124   return type == DataType::Type::kFloat64 ? DRegisterFrom(location) : SRegisterFrom(location);
125 }
126 
OutputFPRegister(HInstruction * instr)127 inline vixl::aarch64::VRegister OutputFPRegister(HInstruction* instr) {
128   return FPRegisterFrom(instr->GetLocations()->Out(), instr->GetType());
129 }
130 
InputFPRegisterAt(HInstruction * instr,int input_index)131 inline vixl::aarch64::VRegister InputFPRegisterAt(HInstruction* instr, int input_index) {
132   return FPRegisterFrom(instr->GetLocations()->InAt(input_index),
133                         instr->InputAt(input_index)->GetType());
134 }
135 
CPURegisterFrom(Location location,DataType::Type type)136 inline vixl::aarch64::CPURegister CPURegisterFrom(Location location, DataType::Type type) {
137   return DataType::IsFloatingPointType(type)
138       ? vixl::aarch64::CPURegister(FPRegisterFrom(location, type))
139       : vixl::aarch64::CPURegister(RegisterFrom(location, type));
140 }
141 
OutputCPURegister(HInstruction * instr)142 inline vixl::aarch64::CPURegister OutputCPURegister(HInstruction* instr) {
143   return DataType::IsFloatingPointType(instr->GetType())
144       ? static_cast<vixl::aarch64::CPURegister>(OutputFPRegister(instr))
145       : static_cast<vixl::aarch64::CPURegister>(OutputRegister(instr));
146 }
147 
InputCPURegisterAt(HInstruction * instr,int index)148 inline vixl::aarch64::CPURegister InputCPURegisterAt(HInstruction* instr, int index) {
149   return DataType::IsFloatingPointType(instr->InputAt(index)->GetType())
150       ? static_cast<vixl::aarch64::CPURegister>(InputFPRegisterAt(instr, index))
151       : static_cast<vixl::aarch64::CPURegister>(InputRegisterAt(instr, index));
152 }
153 
InputCPURegisterOrZeroRegAt(HInstruction * instr,int index)154 inline vixl::aarch64::CPURegister InputCPURegisterOrZeroRegAt(HInstruction* instr,
155                                                                      int index) {
156   HInstruction* input = instr->InputAt(index);
157   DataType::Type input_type = input->GetType();
158   if (IsZeroBitPattern(input)) {
159     return (DataType::Size(input_type) >= vixl::aarch64::kXRegSizeInBytes)
160         ? vixl::aarch64::Register(vixl::aarch64::xzr)
161         : vixl::aarch64::Register(vixl::aarch64::wzr);
162   }
163   return InputCPURegisterAt(instr, index);
164 }
165 
Int64FromLocation(Location location)166 inline int64_t Int64FromLocation(Location location) {
167   return Int64FromConstant(location.GetConstant());
168 }
169 
OperandFrom(Location location,DataType::Type type)170 inline vixl::aarch64::Operand OperandFrom(Location location, DataType::Type type) {
171   if (location.IsRegister()) {
172     return vixl::aarch64::Operand(RegisterFrom(location, type));
173   } else {
174     return vixl::aarch64::Operand(Int64FromLocation(location));
175   }
176 }
177 
InputOperandAt(HInstruction * instr,int input_index)178 inline vixl::aarch64::Operand InputOperandAt(HInstruction* instr, int input_index) {
179   return OperandFrom(instr->GetLocations()->InAt(input_index),
180                      instr->InputAt(input_index)->GetType());
181 }
182 
StackOperandFrom(Location location)183 inline vixl::aarch64::MemOperand StackOperandFrom(Location location) {
184   return vixl::aarch64::MemOperand(vixl::aarch64::sp, location.GetStackIndex());
185 }
186 
SveStackOperandFrom(Location location)187 inline vixl::aarch64::SVEMemOperand SveStackOperandFrom(Location location) {
188   return vixl::aarch64::SVEMemOperand(vixl::aarch64::sp, location.GetStackIndex());
189 }
190 
191 inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
192                                                     size_t offset = 0) {
193   // A heap reference must be 32bit, so fit in a W register.
194   DCHECK(base.IsW());
195   return vixl::aarch64::MemOperand(base.X(), offset);
196 }
197 
198 inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
199                                                     const vixl::aarch64::Register& regoffset,
200                                                     vixl::aarch64::Shift shift = vixl::aarch64::LSL,
201                                                     unsigned shift_amount = 0) {
202   // A heap reference must be 32bit, so fit in a W register.
203   DCHECK(base.IsW());
204   return vixl::aarch64::MemOperand(base.X(), regoffset, shift, shift_amount);
205 }
206 
HeapOperand(const vixl::aarch64::Register & base,Offset offset)207 inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
208                                                     Offset offset) {
209   return HeapOperand(base, offset.SizeValue());
210 }
211 
HeapOperandFrom(Location location,Offset offset)212 inline vixl::aarch64::MemOperand HeapOperandFrom(Location location, Offset offset) {
213   return HeapOperand(RegisterFrom(location, DataType::Type::kReference), offset);
214 }
215 
LocationFrom(const vixl::aarch64::Register & reg)216 inline Location LocationFrom(const vixl::aarch64::Register& reg) {
217   return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.GetCode()));
218 }
219 
LocationFrom(const vixl::aarch64::VRegister & fpreg)220 inline Location LocationFrom(const vixl::aarch64::VRegister& fpreg) {
221   return Location::FpuRegisterLocation(fpreg.GetCode());
222 }
223 
LocationFrom(const vixl::aarch64::ZRegister & zreg)224 inline Location LocationFrom(const vixl::aarch64::ZRegister& zreg) {
225   return Location::FpuRegisterLocation(zreg.GetCode());
226 }
227 
OperandFromMemOperand(const vixl::aarch64::MemOperand & mem_op)228 inline vixl::aarch64::Operand OperandFromMemOperand(
229     const vixl::aarch64::MemOperand& mem_op) {
230   if (mem_op.IsImmediateOffset()) {
231     return vixl::aarch64::Operand(mem_op.GetOffset());
232   } else {
233     DCHECK(mem_op.IsRegisterOffset());
234     if (mem_op.GetExtend() != vixl::aarch64::NO_EXTEND) {
235       return vixl::aarch64::Operand(mem_op.GetRegisterOffset(),
236                                     mem_op.GetExtend(),
237                                     mem_op.GetShiftAmount());
238     } else if (mem_op.GetShift() != vixl::aarch64::NO_SHIFT) {
239       return vixl::aarch64::Operand(mem_op.GetRegisterOffset(),
240                                     mem_op.GetShift(),
241                                     mem_op.GetShiftAmount());
242     } else {
243       LOG(FATAL) << "Should not reach here";
244       UNREACHABLE();
245     }
246   }
247 }
248 
AddSubCanEncodeAsImmediate(int64_t value)249 inline bool AddSubCanEncodeAsImmediate(int64_t value) {
250   // If `value` does not fit but `-value` does, VIXL will automatically use
251   // the 'opposite' instruction.
252   return vixl::aarch64::Assembler::IsImmAddSub(value)
253       || vixl::aarch64::Assembler::IsImmAddSub(-value);
254 }
255 
Arm64CanEncodeConstantAsImmediate(HConstant * constant,HInstruction * instr)256 inline bool Arm64CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* instr) {
257   int64_t value = CodeGenerator::GetInt64ValueOf(constant);
258 
259   // TODO: Improve this when IsSIMDConstantEncodable method is implemented in VIXL.
260   if (instr->IsVecReplicateScalar()) {
261     if (constant->IsLongConstant()) {
262       return false;
263     } else if (constant->IsFloatConstant()) {
264       return vixl::aarch64::Assembler::IsImmFP32(constant->AsFloatConstant()->GetValue());
265     } else if (constant->IsDoubleConstant()) {
266       return vixl::aarch64::Assembler::IsImmFP64(constant->AsDoubleConstant()->GetValue());
267     }
268     return IsUint<8>(value);
269   }
270 
271   // Code generation for Min/Max:
272   //    Cmp left_op, right_op
273   //    Csel dst, left_op, right_op, cond
274   if (instr->IsMin() || instr->IsMax()) {
275     if (constant->GetUses().HasExactlyOneElement()) {
276       // If value can be encoded as immediate for the Cmp, then let VIXL handle
277       // the constant generation for the Csel.
278       return AddSubCanEncodeAsImmediate(value);
279     }
280     // These values are encodable as immediates for Cmp and VIXL will use csinc and csinv
281     // with the zr register as right_op, hence no constant generation is required.
282     return constant->IsZeroBitPattern() || constant->IsOne() || constant->IsMinusOne();
283   }
284 
285   // For single uses we let VIXL handle the constant generation since it will
286   // use registers that are not managed by the register allocator (wip0, wip1).
287   if (constant->GetUses().HasExactlyOneElement()) {
288     return true;
289   }
290 
291   // Our code generator ensures shift distances are within an encodable range.
292   if (instr->IsRor()) {
293     return true;
294   }
295 
296   if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
297     // Uses logical operations.
298     return vixl::aarch64::Assembler::IsImmLogical(value, vixl::aarch64::kXRegSize);
299   } else if (instr->IsNeg()) {
300     // Uses mov -immediate.
301     return vixl::aarch64::Assembler::IsImmMovn(value, vixl::aarch64::kXRegSize);
302   } else {
303     DCHECK(instr->IsAdd() ||
304            instr->IsIntermediateAddress() ||
305            instr->IsBoundsCheck() ||
306            instr->IsCompare() ||
307            instr->IsCondition() ||
308            instr->IsSub())
309         << instr->DebugName();
310     // Uses aliases of ADD/SUB instructions.
311     return AddSubCanEncodeAsImmediate(value);
312   }
313 }
314 
ARM64EncodableConstantOrRegister(HInstruction * constant,HInstruction * instr)315 inline Location ARM64EncodableConstantOrRegister(HInstruction* constant, HInstruction* instr) {
316   if (constant->IsConstant() && Arm64CanEncodeConstantAsImmediate(constant->AsConstant(), instr)) {
317     return Location::ConstantLocation(constant);
318   }
319 
320   return Location::RequiresRegister();
321 }
322 
323 // Check if registers in art register set have the same register code in vixl. If the register
324 // codes are same, we can initialize vixl register list simply by the register masks. Currently,
325 // only SP/WSP and ZXR/WZR codes are different between art and vixl.
326 // Note: This function is only used for debug checks.
ArtVixlRegCodeCoherentForRegSet(uint32_t art_core_registers,size_t num_core,uint32_t art_fpu_registers,size_t num_fpu)327 inline bool ArtVixlRegCodeCoherentForRegSet(uint32_t art_core_registers,
328                                             size_t num_core,
329                                             uint32_t art_fpu_registers,
330                                             size_t num_fpu) {
331   // The register masks won't work if the number of register is larger than 32.
332   DCHECK_GE(sizeof(art_core_registers) * 8, num_core);
333   DCHECK_GE(sizeof(art_fpu_registers) * 8, num_fpu);
334   for (size_t art_reg_code = 0;  art_reg_code < num_core; ++art_reg_code) {
335     if (RegisterSet::Contains(art_core_registers, art_reg_code)) {
336       if (art_reg_code != static_cast<size_t>(VIXLRegCodeFromART(art_reg_code))) {
337         return false;
338       }
339     }
340   }
341   // There is no register code translation for float registers.
342   return true;
343 }
344 
ShiftFromOpKind(HDataProcWithShifterOp::OpKind op_kind)345 inline vixl::aarch64::Shift ShiftFromOpKind(HDataProcWithShifterOp::OpKind op_kind) {
346   switch (op_kind) {
347     case HDataProcWithShifterOp::kASR: return vixl::aarch64::ASR;
348     case HDataProcWithShifterOp::kLSL: return vixl::aarch64::LSL;
349     case HDataProcWithShifterOp::kLSR: return vixl::aarch64::LSR;
350     default:
351       LOG(FATAL) << "Unexpected op kind " << op_kind;
352       UNREACHABLE();
353   }
354 }
355 
ExtendFromOpKind(HDataProcWithShifterOp::OpKind op_kind)356 inline vixl::aarch64::Extend ExtendFromOpKind(HDataProcWithShifterOp::OpKind op_kind) {
357   switch (op_kind) {
358     case HDataProcWithShifterOp::kUXTB: return vixl::aarch64::UXTB;
359     case HDataProcWithShifterOp::kUXTH: return vixl::aarch64::UXTH;
360     case HDataProcWithShifterOp::kUXTW: return vixl::aarch64::UXTW;
361     case HDataProcWithShifterOp::kSXTB: return vixl::aarch64::SXTB;
362     case HDataProcWithShifterOp::kSXTH: return vixl::aarch64::SXTH;
363     case HDataProcWithShifterOp::kSXTW: return vixl::aarch64::SXTW;
364     default:
365       LOG(FATAL) << "Unexpected op kind " << op_kind;
366       UNREACHABLE();
367   }
368 }
369 
ShifterOperandSupportsExtension(HInstruction * instruction)370 inline bool ShifterOperandSupportsExtension(HInstruction* instruction) {
371   DCHECK(HasShifterOperand(instruction, InstructionSet::kArm64));
372   // Although the `neg` instruction is an alias of the `sub` instruction, `HNeg`
373   // does *not* support extension. This is because the `extended register` form
374   // of the `sub` instruction interprets the left register with code 31 as the
375   // stack pointer and not the zero register. (So does the `immediate` form.) In
376   // the other form `shifted register, the register with code 31 is interpreted
377   // as the zero register.
378   return instruction->IsAdd() || instruction->IsSub();
379 }
380 
381 }  // namespace helpers
382 }  // namespace arm64
383 }  // namespace art
384 
385 #endif  // ART_COMPILER_OPTIMIZING_COMMON_ARM64_H_
386