xref: /aosp_15_r20/external/vixl/src/aarch64/operands-aarch64.h (revision f5c631da2f1efdd72b5fd1e20510e4042af13d77)
1*f5c631daSSadaf Ebrahimi // Copyright 2016, VIXL authors
2*f5c631daSSadaf Ebrahimi // All rights reserved.
3*f5c631daSSadaf Ebrahimi //
4*f5c631daSSadaf Ebrahimi // Redistribution and use in source and binary forms, with or without
5*f5c631daSSadaf Ebrahimi // modification, are permitted provided that the following conditions are met:
6*f5c631daSSadaf Ebrahimi //
7*f5c631daSSadaf Ebrahimi //   * Redistributions of source code must retain the above copyright notice,
8*f5c631daSSadaf Ebrahimi //     this list of conditions and the following disclaimer.
9*f5c631daSSadaf Ebrahimi //   * Redistributions in binary form must reproduce the above copyright notice,
10*f5c631daSSadaf Ebrahimi //     this list of conditions and the following disclaimer in the documentation
11*f5c631daSSadaf Ebrahimi //     and/or other materials provided with the distribution.
12*f5c631daSSadaf Ebrahimi //   * Neither the name of ARM Limited nor the names of its contributors may be
13*f5c631daSSadaf Ebrahimi //     used to endorse or promote products derived from this software without
14*f5c631daSSadaf Ebrahimi //     specific prior written permission.
15*f5c631daSSadaf Ebrahimi //
16*f5c631daSSadaf Ebrahimi // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17*f5c631daSSadaf Ebrahimi // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18*f5c631daSSadaf Ebrahimi // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19*f5c631daSSadaf Ebrahimi // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20*f5c631daSSadaf Ebrahimi // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21*f5c631daSSadaf Ebrahimi // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22*f5c631daSSadaf Ebrahimi // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23*f5c631daSSadaf Ebrahimi // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24*f5c631daSSadaf Ebrahimi // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25*f5c631daSSadaf Ebrahimi // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26*f5c631daSSadaf Ebrahimi 
27*f5c631daSSadaf Ebrahimi #ifndef VIXL_AARCH64_OPERANDS_AARCH64_H_
28*f5c631daSSadaf Ebrahimi #define VIXL_AARCH64_OPERANDS_AARCH64_H_
29*f5c631daSSadaf Ebrahimi 
30*f5c631daSSadaf Ebrahimi #include <sstream>
31*f5c631daSSadaf Ebrahimi #include <string>
32*f5c631daSSadaf Ebrahimi 
33*f5c631daSSadaf Ebrahimi #include "instructions-aarch64.h"
34*f5c631daSSadaf Ebrahimi #include "registers-aarch64.h"
35*f5c631daSSadaf Ebrahimi 
36*f5c631daSSadaf Ebrahimi namespace vixl {
37*f5c631daSSadaf Ebrahimi namespace aarch64 {
38*f5c631daSSadaf Ebrahimi 
39*f5c631daSSadaf Ebrahimi // Lists of registers.
40*f5c631daSSadaf Ebrahimi class CPURegList {
41*f5c631daSSadaf Ebrahimi  public:
42*f5c631daSSadaf Ebrahimi   explicit CPURegList(CPURegister reg1,
43*f5c631daSSadaf Ebrahimi                       CPURegister reg2 = NoCPUReg,
44*f5c631daSSadaf Ebrahimi                       CPURegister reg3 = NoCPUReg,
45*f5c631daSSadaf Ebrahimi                       CPURegister reg4 = NoCPUReg)
46*f5c631daSSadaf Ebrahimi       : list_(reg1.GetBit() | reg2.GetBit() | reg3.GetBit() | reg4.GetBit()),
47*f5c631daSSadaf Ebrahimi         size_(reg1.GetSizeInBits()),
48*f5c631daSSadaf Ebrahimi         type_(reg1.GetType()) {
49*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
50*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
51*f5c631daSSadaf Ebrahimi   }
52*f5c631daSSadaf Ebrahimi 
CPURegList(CPURegister::RegisterType type,unsigned size,RegList list)53*f5c631daSSadaf Ebrahimi   CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
54*f5c631daSSadaf Ebrahimi       : list_(list), size_(size), type_(type) {
55*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
56*f5c631daSSadaf Ebrahimi   }
57*f5c631daSSadaf Ebrahimi 
CPURegList(CPURegister::RegisterType type,unsigned size,unsigned first_reg,unsigned last_reg)58*f5c631daSSadaf Ebrahimi   CPURegList(CPURegister::RegisterType type,
59*f5c631daSSadaf Ebrahimi              unsigned size,
60*f5c631daSSadaf Ebrahimi              unsigned first_reg,
61*f5c631daSSadaf Ebrahimi              unsigned last_reg)
62*f5c631daSSadaf Ebrahimi       : size_(size), type_(type) {
63*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(
64*f5c631daSSadaf Ebrahimi         ((type == CPURegister::kRegister) && (last_reg < kNumberOfRegisters)) ||
65*f5c631daSSadaf Ebrahimi         ((type == CPURegister::kVRegister) &&
66*f5c631daSSadaf Ebrahimi          (last_reg < kNumberOfVRegisters)));
67*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(last_reg >= first_reg);
68*f5c631daSSadaf Ebrahimi     list_ = (UINT64_C(1) << (last_reg + 1)) - 1;
69*f5c631daSSadaf Ebrahimi     list_ &= ~((UINT64_C(1) << first_reg) - 1);
70*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
71*f5c631daSSadaf Ebrahimi   }
72*f5c631daSSadaf Ebrahimi 
73*f5c631daSSadaf Ebrahimi   // Construct an empty CPURegList with the specified size and type. If `size`
74*f5c631daSSadaf Ebrahimi   // is CPURegister::kUnknownSize and the register type requires a size, a valid
75*f5c631daSSadaf Ebrahimi   // but unspecified default will be picked.
76*f5c631daSSadaf Ebrahimi   static CPURegList Empty(CPURegister::RegisterType type,
77*f5c631daSSadaf Ebrahimi                           unsigned size = CPURegister::kUnknownSize) {
78*f5c631daSSadaf Ebrahimi     return CPURegList(type, GetDefaultSizeFor(type, size), 0);
79*f5c631daSSadaf Ebrahimi   }
80*f5c631daSSadaf Ebrahimi 
81*f5c631daSSadaf Ebrahimi   // Construct a CPURegList with all possible registers with the specified size
82*f5c631daSSadaf Ebrahimi   // and type. If `size` is CPURegister::kUnknownSize and the register type
83*f5c631daSSadaf Ebrahimi   // requires a size, a valid but unspecified default will be picked.
84*f5c631daSSadaf Ebrahimi   static CPURegList All(CPURegister::RegisterType type,
85*f5c631daSSadaf Ebrahimi                         unsigned size = CPURegister::kUnknownSize) {
86*f5c631daSSadaf Ebrahimi     unsigned number_of_registers = (CPURegister::GetMaxCodeFor(type) + 1);
87*f5c631daSSadaf Ebrahimi     RegList list = (static_cast<RegList>(1) << number_of_registers) - 1;
88*f5c631daSSadaf Ebrahimi     if (type == CPURegister::kRegister) {
89*f5c631daSSadaf Ebrahimi       // GetMaxCodeFor(kRegister) ignores SP, so explicitly include it.
90*f5c631daSSadaf Ebrahimi       list |= (static_cast<RegList>(1) << kSPRegInternalCode);
91*f5c631daSSadaf Ebrahimi     }
92*f5c631daSSadaf Ebrahimi     return CPURegList(type, GetDefaultSizeFor(type, size), list);
93*f5c631daSSadaf Ebrahimi   }
94*f5c631daSSadaf Ebrahimi 
GetType()95*f5c631daSSadaf Ebrahimi   CPURegister::RegisterType GetType() const {
96*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
97*f5c631daSSadaf Ebrahimi     return type_;
98*f5c631daSSadaf Ebrahimi   }
99*f5c631daSSadaf Ebrahimi   VIXL_DEPRECATED("GetType", CPURegister::RegisterType type() const) {
100*f5c631daSSadaf Ebrahimi     return GetType();
101*f5c631daSSadaf Ebrahimi   }
102*f5c631daSSadaf Ebrahimi 
GetBank()103*f5c631daSSadaf Ebrahimi   CPURegister::RegisterBank GetBank() const {
104*f5c631daSSadaf Ebrahimi     return CPURegister::GetBankFor(GetType());
105*f5c631daSSadaf Ebrahimi   }
106*f5c631daSSadaf Ebrahimi 
107*f5c631daSSadaf Ebrahimi   // Combine another CPURegList into this one. Registers that already exist in
108*f5c631daSSadaf Ebrahimi   // this list are left unchanged. The type and size of the registers in the
109*f5c631daSSadaf Ebrahimi   // 'other' list must match those in this list.
Combine(const CPURegList & other)110*f5c631daSSadaf Ebrahimi   void Combine(const CPURegList& other) {
111*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
112*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(other.GetType() == type_);
113*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(other.GetRegisterSizeInBits() == size_);
114*f5c631daSSadaf Ebrahimi     list_ |= other.GetList();
115*f5c631daSSadaf Ebrahimi   }
116*f5c631daSSadaf Ebrahimi 
117*f5c631daSSadaf Ebrahimi   // Remove every register in the other CPURegList from this one. Registers that
118*f5c631daSSadaf Ebrahimi   // do not exist in this list are ignored. The type and size of the registers
119*f5c631daSSadaf Ebrahimi   // in the 'other' list must match those in this list.
Remove(const CPURegList & other)120*f5c631daSSadaf Ebrahimi   void Remove(const CPURegList& other) {
121*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
122*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(other.GetType() == type_);
123*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(other.GetRegisterSizeInBits() == size_);
124*f5c631daSSadaf Ebrahimi     list_ &= ~other.GetList();
125*f5c631daSSadaf Ebrahimi   }
126*f5c631daSSadaf Ebrahimi 
127*f5c631daSSadaf Ebrahimi   // Variants of Combine and Remove which take a single register.
Combine(const CPURegister & other)128*f5c631daSSadaf Ebrahimi   void Combine(const CPURegister& other) {
129*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(other.GetType() == type_);
130*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(other.GetSizeInBits() == size_);
131*f5c631daSSadaf Ebrahimi     Combine(other.GetCode());
132*f5c631daSSadaf Ebrahimi   }
133*f5c631daSSadaf Ebrahimi 
Remove(const CPURegister & other)134*f5c631daSSadaf Ebrahimi   void Remove(const CPURegister& other) {
135*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(other.GetType() == type_);
136*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(other.GetSizeInBits() == size_);
137*f5c631daSSadaf Ebrahimi     Remove(other.GetCode());
138*f5c631daSSadaf Ebrahimi   }
139*f5c631daSSadaf Ebrahimi 
140*f5c631daSSadaf Ebrahimi   // Variants of Combine and Remove which take a single register by its code;
141*f5c631daSSadaf Ebrahimi   // the type and size of the register is inferred from this list.
Combine(int code)142*f5c631daSSadaf Ebrahimi   void Combine(int code) {
143*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
144*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
145*f5c631daSSadaf Ebrahimi     list_ |= (UINT64_C(1) << code);
146*f5c631daSSadaf Ebrahimi   }
147*f5c631daSSadaf Ebrahimi 
Remove(int code)148*f5c631daSSadaf Ebrahimi   void Remove(int code) {
149*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
150*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
151*f5c631daSSadaf Ebrahimi     list_ &= ~(UINT64_C(1) << code);
152*f5c631daSSadaf Ebrahimi   }
153*f5c631daSSadaf Ebrahimi 
Union(const CPURegList & list_1,const CPURegList & list_2)154*f5c631daSSadaf Ebrahimi   static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) {
155*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(list_1.type_ == list_2.type_);
156*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(list_1.size_ == list_2.size_);
157*f5c631daSSadaf Ebrahimi     return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_);
158*f5c631daSSadaf Ebrahimi   }
159*f5c631daSSadaf Ebrahimi   static CPURegList Union(const CPURegList& list_1,
160*f5c631daSSadaf Ebrahimi                           const CPURegList& list_2,
161*f5c631daSSadaf Ebrahimi                           const CPURegList& list_3);
162*f5c631daSSadaf Ebrahimi   static CPURegList Union(const CPURegList& list_1,
163*f5c631daSSadaf Ebrahimi                           const CPURegList& list_2,
164*f5c631daSSadaf Ebrahimi                           const CPURegList& list_3,
165*f5c631daSSadaf Ebrahimi                           const CPURegList& list_4);
166*f5c631daSSadaf Ebrahimi 
Intersection(const CPURegList & list_1,const CPURegList & list_2)167*f5c631daSSadaf Ebrahimi   static CPURegList Intersection(const CPURegList& list_1,
168*f5c631daSSadaf Ebrahimi                                  const CPURegList& list_2) {
169*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(list_1.type_ == list_2.type_);
170*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(list_1.size_ == list_2.size_);
171*f5c631daSSadaf Ebrahimi     return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_);
172*f5c631daSSadaf Ebrahimi   }
173*f5c631daSSadaf Ebrahimi   static CPURegList Intersection(const CPURegList& list_1,
174*f5c631daSSadaf Ebrahimi                                  const CPURegList& list_2,
175*f5c631daSSadaf Ebrahimi                                  const CPURegList& list_3);
176*f5c631daSSadaf Ebrahimi   static CPURegList Intersection(const CPURegList& list_1,
177*f5c631daSSadaf Ebrahimi                                  const CPURegList& list_2,
178*f5c631daSSadaf Ebrahimi                                  const CPURegList& list_3,
179*f5c631daSSadaf Ebrahimi                                  const CPURegList& list_4);
180*f5c631daSSadaf Ebrahimi 
Overlaps(const CPURegList & other)181*f5c631daSSadaf Ebrahimi   bool Overlaps(const CPURegList& other) const {
182*f5c631daSSadaf Ebrahimi     return (type_ == other.type_) && ((list_ & other.list_) != 0);
183*f5c631daSSadaf Ebrahimi   }
184*f5c631daSSadaf Ebrahimi 
GetList()185*f5c631daSSadaf Ebrahimi   RegList GetList() const {
186*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
187*f5c631daSSadaf Ebrahimi     return list_;
188*f5c631daSSadaf Ebrahimi   }
189*f5c631daSSadaf Ebrahimi   VIXL_DEPRECATED("GetList", RegList list() const) { return GetList(); }
190*f5c631daSSadaf Ebrahimi 
SetList(RegList new_list)191*f5c631daSSadaf Ebrahimi   void SetList(RegList new_list) {
192*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
193*f5c631daSSadaf Ebrahimi     list_ = new_list;
194*f5c631daSSadaf Ebrahimi   }
set_list(RegList new_list)195*f5c631daSSadaf Ebrahimi   VIXL_DEPRECATED("SetList", void set_list(RegList new_list)) {
196*f5c631daSSadaf Ebrahimi     return SetList(new_list);
197*f5c631daSSadaf Ebrahimi   }
198*f5c631daSSadaf Ebrahimi 
199*f5c631daSSadaf Ebrahimi   // Remove all callee-saved registers from the list. This can be useful when
200*f5c631daSSadaf Ebrahimi   // preparing registers for an AAPCS64 function call, for example.
201*f5c631daSSadaf Ebrahimi   void RemoveCalleeSaved();
202*f5c631daSSadaf Ebrahimi 
203*f5c631daSSadaf Ebrahimi   // Find the register in this list that appears in `mask` with the lowest or
204*f5c631daSSadaf Ebrahimi   // highest code, remove it from the list and return it as a CPURegister. If
205*f5c631daSSadaf Ebrahimi   // the list is empty, leave it unchanged and return NoCPUReg.
206*f5c631daSSadaf Ebrahimi   CPURegister PopLowestIndex(RegList mask = ~static_cast<RegList>(0));
207*f5c631daSSadaf Ebrahimi   CPURegister PopHighestIndex(RegList mask = ~static_cast<RegList>(0));
208*f5c631daSSadaf Ebrahimi 
209*f5c631daSSadaf Ebrahimi   // AAPCS64 callee-saved registers.
210*f5c631daSSadaf Ebrahimi   static CPURegList GetCalleeSaved(unsigned size = kXRegSize);
211*f5c631daSSadaf Ebrahimi   static CPURegList GetCalleeSavedV(unsigned size = kDRegSize);
212*f5c631daSSadaf Ebrahimi 
213*f5c631daSSadaf Ebrahimi   // AAPCS64 caller-saved registers. Note that this includes lr.
214*f5c631daSSadaf Ebrahimi   // TODO(all): Determine how we handle d8-d15 being callee-saved, but the top
215*f5c631daSSadaf Ebrahimi   // 64-bits being caller-saved.
216*f5c631daSSadaf Ebrahimi   static CPURegList GetCallerSaved(unsigned size = kXRegSize);
217*f5c631daSSadaf Ebrahimi   static CPURegList GetCallerSavedV(unsigned size = kDRegSize);
218*f5c631daSSadaf Ebrahimi 
IsEmpty()219*f5c631daSSadaf Ebrahimi   bool IsEmpty() const {
220*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
221*f5c631daSSadaf Ebrahimi     return list_ == 0;
222*f5c631daSSadaf Ebrahimi   }
223*f5c631daSSadaf Ebrahimi 
IncludesAliasOf(const CPURegister & other)224*f5c631daSSadaf Ebrahimi   bool IncludesAliasOf(const CPURegister& other) const {
225*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
226*f5c631daSSadaf Ebrahimi     return (GetBank() == other.GetBank()) && IncludesAliasOf(other.GetCode());
227*f5c631daSSadaf Ebrahimi   }
228*f5c631daSSadaf Ebrahimi 
IncludesAliasOf(int code)229*f5c631daSSadaf Ebrahimi   bool IncludesAliasOf(int code) const {
230*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
231*f5c631daSSadaf Ebrahimi     return (((static_cast<RegList>(1) << code) & list_) != 0);
232*f5c631daSSadaf Ebrahimi   }
233*f5c631daSSadaf Ebrahimi 
GetCount()234*f5c631daSSadaf Ebrahimi   int GetCount() const {
235*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
236*f5c631daSSadaf Ebrahimi     return CountSetBits(list_);
237*f5c631daSSadaf Ebrahimi   }
Count()238*f5c631daSSadaf Ebrahimi   VIXL_DEPRECATED("GetCount", int Count()) const { return GetCount(); }
239*f5c631daSSadaf Ebrahimi 
GetRegisterSizeInBits()240*f5c631daSSadaf Ebrahimi   int GetRegisterSizeInBits() const {
241*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
242*f5c631daSSadaf Ebrahimi     return size_;
243*f5c631daSSadaf Ebrahimi   }
RegisterSizeInBits()244*f5c631daSSadaf Ebrahimi   VIXL_DEPRECATED("GetRegisterSizeInBits", int RegisterSizeInBits() const) {
245*f5c631daSSadaf Ebrahimi     return GetRegisterSizeInBits();
246*f5c631daSSadaf Ebrahimi   }
247*f5c631daSSadaf Ebrahimi 
GetRegisterSizeInBytes()248*f5c631daSSadaf Ebrahimi   int GetRegisterSizeInBytes() const {
249*f5c631daSSadaf Ebrahimi     int size_in_bits = GetRegisterSizeInBits();
250*f5c631daSSadaf Ebrahimi     VIXL_ASSERT((size_in_bits % 8) == 0);
251*f5c631daSSadaf Ebrahimi     return size_in_bits / 8;
252*f5c631daSSadaf Ebrahimi   }
RegisterSizeInBytes()253*f5c631daSSadaf Ebrahimi   VIXL_DEPRECATED("GetRegisterSizeInBytes", int RegisterSizeInBytes() const) {
254*f5c631daSSadaf Ebrahimi     return GetRegisterSizeInBytes();
255*f5c631daSSadaf Ebrahimi   }
256*f5c631daSSadaf Ebrahimi 
GetTotalSizeInBytes()257*f5c631daSSadaf Ebrahimi   unsigned GetTotalSizeInBytes() const {
258*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
259*f5c631daSSadaf Ebrahimi     return GetRegisterSizeInBytes() * GetCount();
260*f5c631daSSadaf Ebrahimi   }
TotalSizeInBytes()261*f5c631daSSadaf Ebrahimi   VIXL_DEPRECATED("GetTotalSizeInBytes", unsigned TotalSizeInBytes() const) {
262*f5c631daSSadaf Ebrahimi     return GetTotalSizeInBytes();
263*f5c631daSSadaf Ebrahimi   }
264*f5c631daSSadaf Ebrahimi 
265*f5c631daSSadaf Ebrahimi  private:
266*f5c631daSSadaf Ebrahimi   // If `size` is CPURegister::kUnknownSize and the type requires a known size,
267*f5c631daSSadaf Ebrahimi   // then return an arbitrary-but-valid size.
268*f5c631daSSadaf Ebrahimi   //
269*f5c631daSSadaf Ebrahimi   // Otherwise, the size is checked for validity and returned unchanged.
GetDefaultSizeFor(CPURegister::RegisterType type,unsigned size)270*f5c631daSSadaf Ebrahimi   static unsigned GetDefaultSizeFor(CPURegister::RegisterType type,
271*f5c631daSSadaf Ebrahimi                                     unsigned size) {
272*f5c631daSSadaf Ebrahimi     if (size == CPURegister::kUnknownSize) {
273*f5c631daSSadaf Ebrahimi       if (type == CPURegister::kRegister) size = kXRegSize;
274*f5c631daSSadaf Ebrahimi       if (type == CPURegister::kVRegister) size = kQRegSize;
275*f5c631daSSadaf Ebrahimi       // All other types require kUnknownSize.
276*f5c631daSSadaf Ebrahimi     }
277*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(CPURegister(0, size, type).IsValid());
278*f5c631daSSadaf Ebrahimi     return size;
279*f5c631daSSadaf Ebrahimi   }
280*f5c631daSSadaf Ebrahimi 
281*f5c631daSSadaf Ebrahimi   RegList list_;
282*f5c631daSSadaf Ebrahimi   int size_;
283*f5c631daSSadaf Ebrahimi   CPURegister::RegisterType type_;
284*f5c631daSSadaf Ebrahimi 
285*f5c631daSSadaf Ebrahimi   bool IsValid() const;
286*f5c631daSSadaf Ebrahimi };
287*f5c631daSSadaf Ebrahimi 
288*f5c631daSSadaf Ebrahimi 
289*f5c631daSSadaf Ebrahimi // AAPCS64 callee-saved registers.
290*f5c631daSSadaf Ebrahimi extern const CPURegList kCalleeSaved;
291*f5c631daSSadaf Ebrahimi extern const CPURegList kCalleeSavedV;
292*f5c631daSSadaf Ebrahimi 
293*f5c631daSSadaf Ebrahimi 
294*f5c631daSSadaf Ebrahimi // AAPCS64 caller-saved registers. Note that this includes lr.
295*f5c631daSSadaf Ebrahimi extern const CPURegList kCallerSaved;
296*f5c631daSSadaf Ebrahimi extern const CPURegList kCallerSavedV;
297*f5c631daSSadaf Ebrahimi 
298*f5c631daSSadaf Ebrahimi class IntegerOperand;
299*f5c631daSSadaf Ebrahimi 
300*f5c631daSSadaf Ebrahimi // Operand.
301*f5c631daSSadaf Ebrahimi class Operand {
302*f5c631daSSadaf Ebrahimi  public:
303*f5c631daSSadaf Ebrahimi   // #<immediate>
304*f5c631daSSadaf Ebrahimi   // where <immediate> is int64_t.
305*f5c631daSSadaf Ebrahimi   // This is allowed to be an implicit constructor because Operand is
306*f5c631daSSadaf Ebrahimi   // a wrapper class that doesn't normally perform any type conversion.
307*f5c631daSSadaf Ebrahimi   Operand(int64_t immediate);  // NOLINT(runtime/explicit)
308*f5c631daSSadaf Ebrahimi 
309*f5c631daSSadaf Ebrahimi   Operand(IntegerOperand immediate);  // NOLINT(runtime/explicit)
310*f5c631daSSadaf Ebrahimi 
311*f5c631daSSadaf Ebrahimi   // rm, {<shift> #<shift_amount>}
312*f5c631daSSadaf Ebrahimi   // where <shift> is one of {LSL, LSR, ASR, ROR}.
313*f5c631daSSadaf Ebrahimi   //       <shift_amount> is uint6_t.
314*f5c631daSSadaf Ebrahimi   // This is allowed to be an implicit constructor because Operand is
315*f5c631daSSadaf Ebrahimi   // a wrapper class that doesn't normally perform any type conversion.
316*f5c631daSSadaf Ebrahimi   Operand(Register reg,
317*f5c631daSSadaf Ebrahimi           Shift shift = LSL,
318*f5c631daSSadaf Ebrahimi           unsigned shift_amount = 0);  // NOLINT(runtime/explicit)
319*f5c631daSSadaf Ebrahimi 
320*f5c631daSSadaf Ebrahimi   // rm, {<extend> {#<shift_amount>}}
321*f5c631daSSadaf Ebrahimi   // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
322*f5c631daSSadaf Ebrahimi   //       <shift_amount> is uint2_t.
323*f5c631daSSadaf Ebrahimi   explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0);
324*f5c631daSSadaf Ebrahimi 
325*f5c631daSSadaf Ebrahimi   bool IsImmediate() const;
326*f5c631daSSadaf Ebrahimi   bool IsPlainRegister() const;
327*f5c631daSSadaf Ebrahimi   bool IsShiftedRegister() const;
328*f5c631daSSadaf Ebrahimi   bool IsExtendedRegister() const;
329*f5c631daSSadaf Ebrahimi   bool IsZero() const;
330*f5c631daSSadaf Ebrahimi 
331*f5c631daSSadaf Ebrahimi   // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
332*f5c631daSSadaf Ebrahimi   // which helps in the encoding of instructions that use the stack pointer.
333*f5c631daSSadaf Ebrahimi   Operand ToExtendedRegister() const;
334*f5c631daSSadaf Ebrahimi 
GetImmediate()335*f5c631daSSadaf Ebrahimi   int64_t GetImmediate() const {
336*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsImmediate());
337*f5c631daSSadaf Ebrahimi     return immediate_;
338*f5c631daSSadaf Ebrahimi   }
339*f5c631daSSadaf Ebrahimi   VIXL_DEPRECATED("GetImmediate", int64_t immediate() const) {
340*f5c631daSSadaf Ebrahimi     return GetImmediate();
341*f5c631daSSadaf Ebrahimi   }
342*f5c631daSSadaf Ebrahimi 
GetEquivalentImmediate()343*f5c631daSSadaf Ebrahimi   int64_t GetEquivalentImmediate() const {
344*f5c631daSSadaf Ebrahimi     return IsZero() ? 0 : GetImmediate();
345*f5c631daSSadaf Ebrahimi   }
346*f5c631daSSadaf Ebrahimi 
GetRegister()347*f5c631daSSadaf Ebrahimi   Register GetRegister() const {
348*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
349*f5c631daSSadaf Ebrahimi     return reg_;
350*f5c631daSSadaf Ebrahimi   }
351*f5c631daSSadaf Ebrahimi   VIXL_DEPRECATED("GetRegister", Register reg() const) { return GetRegister(); }
GetBaseRegister()352*f5c631daSSadaf Ebrahimi   Register GetBaseRegister() const { return GetRegister(); }
353*f5c631daSSadaf Ebrahimi 
GetShift()354*f5c631daSSadaf Ebrahimi   Shift GetShift() const {
355*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsShiftedRegister());
356*f5c631daSSadaf Ebrahimi     return shift_;
357*f5c631daSSadaf Ebrahimi   }
358*f5c631daSSadaf Ebrahimi   VIXL_DEPRECATED("GetShift", Shift shift() const) { return GetShift(); }
359*f5c631daSSadaf Ebrahimi 
GetExtend()360*f5c631daSSadaf Ebrahimi   Extend GetExtend() const {
361*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsExtendedRegister());
362*f5c631daSSadaf Ebrahimi     return extend_;
363*f5c631daSSadaf Ebrahimi   }
364*f5c631daSSadaf Ebrahimi   VIXL_DEPRECATED("GetExtend", Extend extend() const) { return GetExtend(); }
365*f5c631daSSadaf Ebrahimi 
GetShiftAmount()366*f5c631daSSadaf Ebrahimi   unsigned GetShiftAmount() const {
367*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
368*f5c631daSSadaf Ebrahimi     return shift_amount_;
369*f5c631daSSadaf Ebrahimi   }
shift_amount()370*f5c631daSSadaf Ebrahimi   VIXL_DEPRECATED("GetShiftAmount", unsigned shift_amount() const) {
371*f5c631daSSadaf Ebrahimi     return GetShiftAmount();
372*f5c631daSSadaf Ebrahimi   }
373*f5c631daSSadaf Ebrahimi 
374*f5c631daSSadaf Ebrahimi  private:
375*f5c631daSSadaf Ebrahimi   int64_t immediate_;
376*f5c631daSSadaf Ebrahimi   Register reg_;
377*f5c631daSSadaf Ebrahimi   Shift shift_;
378*f5c631daSSadaf Ebrahimi   Extend extend_;
379*f5c631daSSadaf Ebrahimi   unsigned shift_amount_;
380*f5c631daSSadaf Ebrahimi };
381*f5c631daSSadaf Ebrahimi 
382*f5c631daSSadaf Ebrahimi 
383*f5c631daSSadaf Ebrahimi // MemOperand represents the addressing mode of a load or store instruction.
384*f5c631daSSadaf Ebrahimi // In assembly syntax, MemOperands are normally denoted by one or more elements
385*f5c631daSSadaf Ebrahimi // inside or around square brackets.
386*f5c631daSSadaf Ebrahimi class MemOperand {
387*f5c631daSSadaf Ebrahimi  public:
388*f5c631daSSadaf Ebrahimi   // Creates an invalid `MemOperand`.
389*f5c631daSSadaf Ebrahimi   MemOperand();
390*f5c631daSSadaf Ebrahimi   explicit MemOperand(Register base,
391*f5c631daSSadaf Ebrahimi                       int64_t offset = 0,
392*f5c631daSSadaf Ebrahimi                       AddrMode addrmode = Offset);
393*f5c631daSSadaf Ebrahimi   MemOperand(Register base,
394*f5c631daSSadaf Ebrahimi              Register regoffset,
395*f5c631daSSadaf Ebrahimi              Shift shift = LSL,
396*f5c631daSSadaf Ebrahimi              unsigned shift_amount = 0);
397*f5c631daSSadaf Ebrahimi   MemOperand(Register base,
398*f5c631daSSadaf Ebrahimi              Register regoffset,
399*f5c631daSSadaf Ebrahimi              Extend extend,
400*f5c631daSSadaf Ebrahimi              unsigned shift_amount = 0);
401*f5c631daSSadaf Ebrahimi   MemOperand(Register base, const Operand& offset, AddrMode addrmode = Offset);
402*f5c631daSSadaf Ebrahimi 
GetBaseRegister()403*f5c631daSSadaf Ebrahimi   const Register& GetBaseRegister() const { return base_; }
404*f5c631daSSadaf Ebrahimi 
405*f5c631daSSadaf Ebrahimi   // If the MemOperand has a register offset, return it. (This also applies to
406*f5c631daSSadaf Ebrahimi   // pre- and post-index modes.) Otherwise, return NoReg.
GetRegisterOffset()407*f5c631daSSadaf Ebrahimi   const Register& GetRegisterOffset() const { return regoffset_; }
408*f5c631daSSadaf Ebrahimi 
409*f5c631daSSadaf Ebrahimi   // If the MemOperand has an immediate offset, return it. (This also applies to
410*f5c631daSSadaf Ebrahimi   // pre- and post-index modes.) Otherwise, return 0.
GetOffset()411*f5c631daSSadaf Ebrahimi   int64_t GetOffset() const { return offset_; }
412*f5c631daSSadaf Ebrahimi 
GetAddrMode()413*f5c631daSSadaf Ebrahimi   AddrMode GetAddrMode() const { return addrmode_; }
GetShift()414*f5c631daSSadaf Ebrahimi   Shift GetShift() const { return shift_; }
GetExtend()415*f5c631daSSadaf Ebrahimi   Extend GetExtend() const { return extend_; }
416*f5c631daSSadaf Ebrahimi 
GetShiftAmount()417*f5c631daSSadaf Ebrahimi   unsigned GetShiftAmount() const {
418*f5c631daSSadaf Ebrahimi     // Extend modes can also encode a shift for some instructions.
419*f5c631daSSadaf Ebrahimi     VIXL_ASSERT((GetShift() != NO_SHIFT) || (GetExtend() != NO_EXTEND));
420*f5c631daSSadaf Ebrahimi     return shift_amount_;
421*f5c631daSSadaf Ebrahimi   }
422*f5c631daSSadaf Ebrahimi 
423*f5c631daSSadaf Ebrahimi   // True for MemOperands which represent something like [x0].
424*f5c631daSSadaf Ebrahimi   // Currently, this will also return true for [x0, #0], because MemOperand has
425*f5c631daSSadaf Ebrahimi   // no way to distinguish the two.
426*f5c631daSSadaf Ebrahimi   bool IsPlainRegister() const;
427*f5c631daSSadaf Ebrahimi 
428*f5c631daSSadaf Ebrahimi   // True for MemOperands which represent something like [x0], or for compound
429*f5c631daSSadaf Ebrahimi   // MemOperands which are functionally equivalent, such as [x0, #0], [x0, xzr]
430*f5c631daSSadaf Ebrahimi   // or [x0, wzr, UXTW #3].
431*f5c631daSSadaf Ebrahimi   bool IsEquivalentToPlainRegister() const;
432*f5c631daSSadaf Ebrahimi 
433*f5c631daSSadaf Ebrahimi   // True for immediate-offset (but not indexed) MemOperands.
434*f5c631daSSadaf Ebrahimi   bool IsImmediateOffset() const;
435*f5c631daSSadaf Ebrahimi   // True for register-offset (but not indexed) MemOperands.
436*f5c631daSSadaf Ebrahimi   bool IsRegisterOffset() const;
437*f5c631daSSadaf Ebrahimi   // True for immediate or register pre-indexed MemOperands.
438*f5c631daSSadaf Ebrahimi   bool IsPreIndex() const;
439*f5c631daSSadaf Ebrahimi   // True for immediate or register post-indexed MemOperands.
440*f5c631daSSadaf Ebrahimi   bool IsPostIndex() const;
441*f5c631daSSadaf Ebrahimi   // True for immediate pre-indexed MemOperands, [reg, #imm]!
442*f5c631daSSadaf Ebrahimi   bool IsImmediatePreIndex() const;
443*f5c631daSSadaf Ebrahimi   // True for immediate post-indexed MemOperands, [reg], #imm
444*f5c631daSSadaf Ebrahimi   bool IsImmediatePostIndex() const;
445*f5c631daSSadaf Ebrahimi 
446*f5c631daSSadaf Ebrahimi   void AddOffset(int64_t offset);
447*f5c631daSSadaf Ebrahimi 
IsValid()448*f5c631daSSadaf Ebrahimi   bool IsValid() const {
449*f5c631daSSadaf Ebrahimi     return base_.IsValid() &&
450*f5c631daSSadaf Ebrahimi            ((addrmode_ == Offset) || (addrmode_ == PreIndex) ||
451*f5c631daSSadaf Ebrahimi             (addrmode_ == PostIndex)) &&
452*f5c631daSSadaf Ebrahimi            ((shift_ == NO_SHIFT) || (extend_ == NO_EXTEND)) &&
453*f5c631daSSadaf Ebrahimi            ((offset_ == 0) || !regoffset_.IsValid());
454*f5c631daSSadaf Ebrahimi   }
455*f5c631daSSadaf Ebrahimi 
Equals(const MemOperand & other)456*f5c631daSSadaf Ebrahimi   bool Equals(const MemOperand& other) const {
457*f5c631daSSadaf Ebrahimi     return base_.Is(other.base_) && regoffset_.Is(other.regoffset_) &&
458*f5c631daSSadaf Ebrahimi            (offset_ == other.offset_) && (addrmode_ == other.addrmode_) &&
459*f5c631daSSadaf Ebrahimi            (shift_ == other.shift_) && (extend_ == other.extend_) &&
460*f5c631daSSadaf Ebrahimi            (shift_amount_ == other.shift_amount_);
461*f5c631daSSadaf Ebrahimi   }
462*f5c631daSSadaf Ebrahimi 
463*f5c631daSSadaf Ebrahimi  private:
464*f5c631daSSadaf Ebrahimi   Register base_;
465*f5c631daSSadaf Ebrahimi   Register regoffset_;
466*f5c631daSSadaf Ebrahimi   int64_t offset_;
467*f5c631daSSadaf Ebrahimi   AddrMode addrmode_;
468*f5c631daSSadaf Ebrahimi   Shift shift_;
469*f5c631daSSadaf Ebrahimi   Extend extend_;
470*f5c631daSSadaf Ebrahimi   unsigned shift_amount_;
471*f5c631daSSadaf Ebrahimi };
472*f5c631daSSadaf Ebrahimi 
473*f5c631daSSadaf Ebrahimi // SVE supports memory operands which don't make sense to the core ISA, such as
474*f5c631daSSadaf Ebrahimi // scatter-gather forms, in which either the base or offset registers are
475*f5c631daSSadaf Ebrahimi // vectors. This class exists to avoid complicating core-ISA code with
476*f5c631daSSadaf Ebrahimi // SVE-specific behaviour.
477*f5c631daSSadaf Ebrahimi //
478*f5c631daSSadaf Ebrahimi // Note that SVE does not support any pre- or post-index modes.
479*f5c631daSSadaf Ebrahimi class SVEMemOperand {
480*f5c631daSSadaf Ebrahimi  public:
481*f5c631daSSadaf Ebrahimi   // "vector-plus-immediate", like [z0.s, #21]
482*f5c631daSSadaf Ebrahimi   explicit SVEMemOperand(ZRegister base, uint64_t offset = 0)
base_(base)483*f5c631daSSadaf Ebrahimi       : base_(base),
484*f5c631daSSadaf Ebrahimi         regoffset_(NoReg),
485*f5c631daSSadaf Ebrahimi         offset_(RawbitsToInt64(offset)),
486*f5c631daSSadaf Ebrahimi         mod_(NO_SVE_OFFSET_MODIFIER),
487*f5c631daSSadaf Ebrahimi         shift_amount_(0) {
488*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsVectorPlusImmediate());
489*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
490*f5c631daSSadaf Ebrahimi   }
491*f5c631daSSadaf Ebrahimi 
492*f5c631daSSadaf Ebrahimi   // "scalar-plus-immediate", like [x0], [x0, #42] or [x0, #42, MUL_VL]
493*f5c631daSSadaf Ebrahimi   // The only supported modifiers are NO_SVE_OFFSET_MODIFIER or SVE_MUL_VL.
494*f5c631daSSadaf Ebrahimi   //
495*f5c631daSSadaf Ebrahimi   // Note that VIXL cannot currently distinguish between `SVEMemOperand(x0)` and
496*f5c631daSSadaf Ebrahimi   // `SVEMemOperand(x0, 0)`. This is only significant in scalar-plus-scalar
497*f5c631daSSadaf Ebrahimi   // instructions where xm defaults to xzr. However, users should not rely on
498*f5c631daSSadaf Ebrahimi   // `SVEMemOperand(x0, 0)` being accepted in such cases.
499*f5c631daSSadaf Ebrahimi   explicit SVEMemOperand(Register base,
500*f5c631daSSadaf Ebrahimi                          uint64_t offset = 0,
501*f5c631daSSadaf Ebrahimi                          SVEOffsetModifier mod = NO_SVE_OFFSET_MODIFIER)
base_(base)502*f5c631daSSadaf Ebrahimi       : base_(base),
503*f5c631daSSadaf Ebrahimi         regoffset_(NoReg),
504*f5c631daSSadaf Ebrahimi         offset_(RawbitsToInt64(offset)),
505*f5c631daSSadaf Ebrahimi         mod_(mod),
506*f5c631daSSadaf Ebrahimi         shift_amount_(0) {
507*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsScalarPlusImmediate());
508*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
509*f5c631daSSadaf Ebrahimi   }
510*f5c631daSSadaf Ebrahimi 
511*f5c631daSSadaf Ebrahimi   // "scalar-plus-scalar", like [x0, x1]
512*f5c631daSSadaf Ebrahimi   // "scalar-plus-vector", like [x0, z1.d]
SVEMemOperand(Register base,CPURegister offset)513*f5c631daSSadaf Ebrahimi   SVEMemOperand(Register base, CPURegister offset)
514*f5c631daSSadaf Ebrahimi       : base_(base),
515*f5c631daSSadaf Ebrahimi         regoffset_(offset),
516*f5c631daSSadaf Ebrahimi         offset_(0),
517*f5c631daSSadaf Ebrahimi         mod_(NO_SVE_OFFSET_MODIFIER),
518*f5c631daSSadaf Ebrahimi         shift_amount_(0) {
519*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsScalarPlusScalar() || IsScalarPlusVector());
520*f5c631daSSadaf Ebrahimi     if (offset.IsZero()) VIXL_ASSERT(IsEquivalentToScalar());
521*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
522*f5c631daSSadaf Ebrahimi   }
523*f5c631daSSadaf Ebrahimi 
524*f5c631daSSadaf Ebrahimi   // "scalar-plus-vector", like [x0, z1.d, UXTW]
525*f5c631daSSadaf Ebrahimi   // The type of `mod` can be any `SVEOffsetModifier` (other than LSL), or a
526*f5c631daSSadaf Ebrahimi   // corresponding `Extend` value.
527*f5c631daSSadaf Ebrahimi   template <typename M>
SVEMemOperand(Register base,ZRegister offset,M mod)528*f5c631daSSadaf Ebrahimi   SVEMemOperand(Register base, ZRegister offset, M mod)
529*f5c631daSSadaf Ebrahimi       : base_(base),
530*f5c631daSSadaf Ebrahimi         regoffset_(offset),
531*f5c631daSSadaf Ebrahimi         offset_(0),
532*f5c631daSSadaf Ebrahimi         mod_(GetSVEOffsetModifierFor(mod)),
533*f5c631daSSadaf Ebrahimi         shift_amount_(0) {
534*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(mod_ != SVE_LSL);  // LSL requires an explicit shift amount.
535*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsScalarPlusVector());
536*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
537*f5c631daSSadaf Ebrahimi   }
538*f5c631daSSadaf Ebrahimi 
539*f5c631daSSadaf Ebrahimi   // "scalar-plus-scalar", like [x0, x1, LSL #1]
540*f5c631daSSadaf Ebrahimi   // "scalar-plus-vector", like [x0, z1.d, LSL #2]
541*f5c631daSSadaf Ebrahimi   // The type of `mod` can be any `SVEOffsetModifier`, or a corresponding
542*f5c631daSSadaf Ebrahimi   // `Shift` or `Extend` value.
543*f5c631daSSadaf Ebrahimi   template <typename M>
SVEMemOperand(Register base,CPURegister offset,M mod,unsigned shift_amount)544*f5c631daSSadaf Ebrahimi   SVEMemOperand(Register base, CPURegister offset, M mod, unsigned shift_amount)
545*f5c631daSSadaf Ebrahimi       : base_(base),
546*f5c631daSSadaf Ebrahimi         regoffset_(offset),
547*f5c631daSSadaf Ebrahimi         offset_(0),
548*f5c631daSSadaf Ebrahimi         mod_(GetSVEOffsetModifierFor(mod)),
549*f5c631daSSadaf Ebrahimi         shift_amount_(shift_amount) {
550*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
551*f5c631daSSadaf Ebrahimi   }
552*f5c631daSSadaf Ebrahimi 
553*f5c631daSSadaf Ebrahimi   // "vector-plus-scalar", like [z0.d, x0]
SVEMemOperand(ZRegister base,Register offset)554*f5c631daSSadaf Ebrahimi   SVEMemOperand(ZRegister base, Register offset)
555*f5c631daSSadaf Ebrahimi       : base_(base),
556*f5c631daSSadaf Ebrahimi         regoffset_(offset),
557*f5c631daSSadaf Ebrahimi         offset_(0),
558*f5c631daSSadaf Ebrahimi         mod_(NO_SVE_OFFSET_MODIFIER),
559*f5c631daSSadaf Ebrahimi         shift_amount_(0) {
560*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
561*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsVectorPlusScalar());
562*f5c631daSSadaf Ebrahimi   }
563*f5c631daSSadaf Ebrahimi 
564*f5c631daSSadaf Ebrahimi   // "vector-plus-vector", like [z0.d, z1.d, UXTW]
565*f5c631daSSadaf Ebrahimi   template <typename M = SVEOffsetModifier>
566*f5c631daSSadaf Ebrahimi   SVEMemOperand(ZRegister base,
567*f5c631daSSadaf Ebrahimi                 ZRegister offset,
568*f5c631daSSadaf Ebrahimi                 M mod = NO_SVE_OFFSET_MODIFIER,
569*f5c631daSSadaf Ebrahimi                 unsigned shift_amount = 0)
base_(base)570*f5c631daSSadaf Ebrahimi       : base_(base),
571*f5c631daSSadaf Ebrahimi         regoffset_(offset),
572*f5c631daSSadaf Ebrahimi         offset_(0),
573*f5c631daSSadaf Ebrahimi         mod_(GetSVEOffsetModifierFor(mod)),
574*f5c631daSSadaf Ebrahimi         shift_amount_(shift_amount) {
575*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
576*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsVectorPlusVector());
577*f5c631daSSadaf Ebrahimi   }
578*f5c631daSSadaf Ebrahimi 
579*f5c631daSSadaf Ebrahimi   // True for SVEMemOperands which represent something like [x0].
580*f5c631daSSadaf Ebrahimi   // This will also return true for [x0, #0], because there is no way
581*f5c631daSSadaf Ebrahimi   // to distinguish the two.
IsPlainScalar()582*f5c631daSSadaf Ebrahimi   bool IsPlainScalar() const {
583*f5c631daSSadaf Ebrahimi     return IsScalarPlusImmediate() && (offset_ == 0);
584*f5c631daSSadaf Ebrahimi   }
585*f5c631daSSadaf Ebrahimi 
586*f5c631daSSadaf Ebrahimi   // True for SVEMemOperands which represent something like [x0], or for
587*f5c631daSSadaf Ebrahimi   // compound SVEMemOperands which are functionally equivalent, such as
588*f5c631daSSadaf Ebrahimi   // [x0, #0], [x0, xzr] or [x0, wzr, UXTW #3].
589*f5c631daSSadaf Ebrahimi   bool IsEquivalentToScalar() const;
590*f5c631daSSadaf Ebrahimi 
591*f5c631daSSadaf Ebrahimi   // True for SVEMemOperands like [x0], [x0, #0], false for [x0, xzr] and
592*f5c631daSSadaf Ebrahimi   // similar.
593*f5c631daSSadaf Ebrahimi   bool IsPlainRegister() const;
594*f5c631daSSadaf Ebrahimi 
IsScalarPlusImmediate()595*f5c631daSSadaf Ebrahimi   bool IsScalarPlusImmediate() const {
596*f5c631daSSadaf Ebrahimi     return base_.IsX() && regoffset_.IsNone() &&
597*f5c631daSSadaf Ebrahimi            ((mod_ == NO_SVE_OFFSET_MODIFIER) || IsMulVl());
598*f5c631daSSadaf Ebrahimi   }
599*f5c631daSSadaf Ebrahimi 
IsScalarPlusScalar()600*f5c631daSSadaf Ebrahimi   bool IsScalarPlusScalar() const {
601*f5c631daSSadaf Ebrahimi     // SVE offers no extend modes for scalar-plus-scalar, so both registers must
602*f5c631daSSadaf Ebrahimi     // be X registers.
603*f5c631daSSadaf Ebrahimi     return base_.IsX() && regoffset_.IsX() &&
604*f5c631daSSadaf Ebrahimi            ((mod_ == NO_SVE_OFFSET_MODIFIER) || (mod_ == SVE_LSL));
605*f5c631daSSadaf Ebrahimi   }
606*f5c631daSSadaf Ebrahimi 
IsScalarPlusVector()607*f5c631daSSadaf Ebrahimi   bool IsScalarPlusVector() const {
608*f5c631daSSadaf Ebrahimi     // The modifier can be LSL or an an extend mode (UXTW or SXTW) here. Unlike
609*f5c631daSSadaf Ebrahimi     // in the core ISA, these extend modes do not imply an S-sized lane, so the
610*f5c631daSSadaf Ebrahimi     // modifier is independent from the lane size. The architecture describes
611*f5c631daSSadaf Ebrahimi     // [US]XTW with a D-sized lane as an "unpacked" offset.
612*f5c631daSSadaf Ebrahimi     return base_.IsX() && regoffset_.IsZRegister() &&
613*f5c631daSSadaf Ebrahimi            (regoffset_.IsLaneSizeS() || regoffset_.IsLaneSizeD()) && !IsMulVl();
614*f5c631daSSadaf Ebrahimi   }
615*f5c631daSSadaf Ebrahimi 
IsVectorPlusImmediate()616*f5c631daSSadaf Ebrahimi   bool IsVectorPlusImmediate() const {
617*f5c631daSSadaf Ebrahimi     return base_.IsZRegister() &&
618*f5c631daSSadaf Ebrahimi            (base_.IsLaneSizeS() || base_.IsLaneSizeD()) &&
619*f5c631daSSadaf Ebrahimi            regoffset_.IsNone() && (mod_ == NO_SVE_OFFSET_MODIFIER);
620*f5c631daSSadaf Ebrahimi   }
621*f5c631daSSadaf Ebrahimi 
IsVectorPlusScalar()622*f5c631daSSadaf Ebrahimi   bool IsVectorPlusScalar() const {
623*f5c631daSSadaf Ebrahimi     return base_.IsZRegister() && regoffset_.IsX() &&
624*f5c631daSSadaf Ebrahimi            (base_.IsLaneSizeS() || base_.IsLaneSizeD());
625*f5c631daSSadaf Ebrahimi   }
626*f5c631daSSadaf Ebrahimi 
IsVectorPlusVector()627*f5c631daSSadaf Ebrahimi   bool IsVectorPlusVector() const {
628*f5c631daSSadaf Ebrahimi     return base_.IsZRegister() && regoffset_.IsZRegister() && (offset_ == 0) &&
629*f5c631daSSadaf Ebrahimi            AreSameFormat(base_, regoffset_) &&
630*f5c631daSSadaf Ebrahimi            (base_.IsLaneSizeS() || base_.IsLaneSizeD());
631*f5c631daSSadaf Ebrahimi   }
632*f5c631daSSadaf Ebrahimi 
IsContiguous()633*f5c631daSSadaf Ebrahimi   bool IsContiguous() const { return !IsScatterGather(); }
IsScatterGather()634*f5c631daSSadaf Ebrahimi   bool IsScatterGather() const {
635*f5c631daSSadaf Ebrahimi     return base_.IsZRegister() || regoffset_.IsZRegister();
636*f5c631daSSadaf Ebrahimi   }
637*f5c631daSSadaf Ebrahimi 
638*f5c631daSSadaf Ebrahimi   // TODO: If necessary, add helpers like `HasScalarBase()`.
639*f5c631daSSadaf Ebrahimi 
GetScalarBase()640*f5c631daSSadaf Ebrahimi   Register GetScalarBase() const {
641*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(base_.IsX());
642*f5c631daSSadaf Ebrahimi     return Register(base_);
643*f5c631daSSadaf Ebrahimi   }
644*f5c631daSSadaf Ebrahimi 
GetVectorBase()645*f5c631daSSadaf Ebrahimi   ZRegister GetVectorBase() const {
646*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(base_.IsZRegister());
647*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(base_.HasLaneSize());
648*f5c631daSSadaf Ebrahimi     return ZRegister(base_);
649*f5c631daSSadaf Ebrahimi   }
650*f5c631daSSadaf Ebrahimi 
GetScalarOffset()651*f5c631daSSadaf Ebrahimi   Register GetScalarOffset() const {
652*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(regoffset_.IsRegister());
653*f5c631daSSadaf Ebrahimi     return Register(regoffset_);
654*f5c631daSSadaf Ebrahimi   }
655*f5c631daSSadaf Ebrahimi 
GetVectorOffset()656*f5c631daSSadaf Ebrahimi   ZRegister GetVectorOffset() const {
657*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(regoffset_.IsZRegister());
658*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(regoffset_.HasLaneSize());
659*f5c631daSSadaf Ebrahimi     return ZRegister(regoffset_);
660*f5c631daSSadaf Ebrahimi   }
661*f5c631daSSadaf Ebrahimi 
GetImmediateOffset()662*f5c631daSSadaf Ebrahimi   int64_t GetImmediateOffset() const {
663*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(regoffset_.IsNone());
664*f5c631daSSadaf Ebrahimi     return offset_;
665*f5c631daSSadaf Ebrahimi   }
666*f5c631daSSadaf Ebrahimi 
GetOffsetModifier()667*f5c631daSSadaf Ebrahimi   SVEOffsetModifier GetOffsetModifier() const { return mod_; }
GetShiftAmount()668*f5c631daSSadaf Ebrahimi   unsigned GetShiftAmount() const { return shift_amount_; }
669*f5c631daSSadaf Ebrahimi 
IsEquivalentToLSL(unsigned amount)670*f5c631daSSadaf Ebrahimi   bool IsEquivalentToLSL(unsigned amount) const {
671*f5c631daSSadaf Ebrahimi     if (shift_amount_ != amount) return false;
672*f5c631daSSadaf Ebrahimi     if (amount == 0) {
673*f5c631daSSadaf Ebrahimi       // No-shift is equivalent to "LSL #0".
674*f5c631daSSadaf Ebrahimi       return ((mod_ == SVE_LSL) || (mod_ == NO_SVE_OFFSET_MODIFIER));
675*f5c631daSSadaf Ebrahimi     }
676*f5c631daSSadaf Ebrahimi     return mod_ == SVE_LSL;
677*f5c631daSSadaf Ebrahimi   }
678*f5c631daSSadaf Ebrahimi 
IsMulVl()679*f5c631daSSadaf Ebrahimi   bool IsMulVl() const { return mod_ == SVE_MUL_VL; }
680*f5c631daSSadaf Ebrahimi 
681*f5c631daSSadaf Ebrahimi   bool IsValid() const;
682*f5c631daSSadaf Ebrahimi 
683*f5c631daSSadaf Ebrahimi  private:
684*f5c631daSSadaf Ebrahimi   // Allow standard `Shift` and `Extend` arguments to be used.
GetSVEOffsetModifierFor(Shift shift)685*f5c631daSSadaf Ebrahimi   SVEOffsetModifier GetSVEOffsetModifierFor(Shift shift) {
686*f5c631daSSadaf Ebrahimi     if (shift == LSL) return SVE_LSL;
687*f5c631daSSadaf Ebrahimi     if (shift == NO_SHIFT) return NO_SVE_OFFSET_MODIFIER;
688*f5c631daSSadaf Ebrahimi     // SVE does not accept any other shift.
689*f5c631daSSadaf Ebrahimi     VIXL_UNIMPLEMENTED();
690*f5c631daSSadaf Ebrahimi     return NO_SVE_OFFSET_MODIFIER;
691*f5c631daSSadaf Ebrahimi   }
692*f5c631daSSadaf Ebrahimi 
693*f5c631daSSadaf Ebrahimi   SVEOffsetModifier GetSVEOffsetModifierFor(Extend extend = NO_EXTEND) {
694*f5c631daSSadaf Ebrahimi     if (extend == UXTW) return SVE_UXTW;
695*f5c631daSSadaf Ebrahimi     if (extend == SXTW) return SVE_SXTW;
696*f5c631daSSadaf Ebrahimi     if (extend == NO_EXTEND) return NO_SVE_OFFSET_MODIFIER;
697*f5c631daSSadaf Ebrahimi     // SVE does not accept any other extend mode.
698*f5c631daSSadaf Ebrahimi     VIXL_UNIMPLEMENTED();
699*f5c631daSSadaf Ebrahimi     return NO_SVE_OFFSET_MODIFIER;
700*f5c631daSSadaf Ebrahimi   }
701*f5c631daSSadaf Ebrahimi 
GetSVEOffsetModifierFor(SVEOffsetModifier mod)702*f5c631daSSadaf Ebrahimi   SVEOffsetModifier GetSVEOffsetModifierFor(SVEOffsetModifier mod) {
703*f5c631daSSadaf Ebrahimi     return mod;
704*f5c631daSSadaf Ebrahimi   }
705*f5c631daSSadaf Ebrahimi 
706*f5c631daSSadaf Ebrahimi   CPURegister base_;
707*f5c631daSSadaf Ebrahimi   CPURegister regoffset_;
708*f5c631daSSadaf Ebrahimi   int64_t offset_;
709*f5c631daSSadaf Ebrahimi   SVEOffsetModifier mod_;
710*f5c631daSSadaf Ebrahimi   unsigned shift_amount_;
711*f5c631daSSadaf Ebrahimi };
712*f5c631daSSadaf Ebrahimi 
713*f5c631daSSadaf Ebrahimi // Represent a signed or unsigned integer operand.
714*f5c631daSSadaf Ebrahimi //
715*f5c631daSSadaf Ebrahimi // This is designed to make instructions which naturally accept a _signed_
716*f5c631daSSadaf Ebrahimi // immediate easier to implement and use, when we also want users to be able to
717*f5c631daSSadaf Ebrahimi // specify raw-bits values (such as with hexadecimal constants). The advantage
718*f5c631daSSadaf Ebrahimi // of this class over a simple uint64_t (with implicit C++ sign-extension) is
719*f5c631daSSadaf Ebrahimi // that this class can strictly check the range of allowed values. With a simple
720*f5c631daSSadaf Ebrahimi // uint64_t, it is impossible to distinguish -1 from UINT64_MAX.
721*f5c631daSSadaf Ebrahimi //
722*f5c631daSSadaf Ebrahimi // For example, these instructions are equivalent:
723*f5c631daSSadaf Ebrahimi //
724*f5c631daSSadaf Ebrahimi //     __ Insr(z0.VnB(), -1);
725*f5c631daSSadaf Ebrahimi //     __ Insr(z0.VnB(), 0xff);
726*f5c631daSSadaf Ebrahimi //
727*f5c631daSSadaf Ebrahimi // ... as are these:
728*f5c631daSSadaf Ebrahimi //
729*f5c631daSSadaf Ebrahimi //     __ Insr(z0.VnD(), -1);
730*f5c631daSSadaf Ebrahimi //     __ Insr(z0.VnD(), 0xffffffffffffffff);
731*f5c631daSSadaf Ebrahimi //
732*f5c631daSSadaf Ebrahimi // ... but this is invalid:
733*f5c631daSSadaf Ebrahimi //
734*f5c631daSSadaf Ebrahimi //     __ Insr(z0.VnB(), 0xffffffffffffffff);  // Too big for B-sized lanes.
735*f5c631daSSadaf Ebrahimi class IntegerOperand {
736*f5c631daSSadaf Ebrahimi  public:
737*f5c631daSSadaf Ebrahimi #define VIXL_INT_TYPES(V) \
738*f5c631daSSadaf Ebrahimi   V(char) V(short) V(int) V(long) V(long long)  // NOLINT(runtime/int)
739*f5c631daSSadaf Ebrahimi #define VIXL_DECL_INT_OVERLOADS(T)                                        \
740*f5c631daSSadaf Ebrahimi   /* These are allowed to be implicit constructors because this is a */   \
741*f5c631daSSadaf Ebrahimi   /* wrapper class that doesn't normally perform any type conversion. */  \
742*f5c631daSSadaf Ebrahimi   IntegerOperand(signed T immediate) /* NOLINT(runtime/explicit) */       \
743*f5c631daSSadaf Ebrahimi       : raw_bits_(immediate),        /* Allow implicit sign-extension. */ \
744*f5c631daSSadaf Ebrahimi         is_negative_(immediate < 0) {}                                    \
745*f5c631daSSadaf Ebrahimi   IntegerOperand(unsigned T immediate) /* NOLINT(runtime/explicit) */     \
746*f5c631daSSadaf Ebrahimi       : raw_bits_(immediate), is_negative_(false) {}
VIXL_INT_TYPES(VIXL_DECL_INT_OVERLOADS)747*f5c631daSSadaf Ebrahimi   VIXL_INT_TYPES(VIXL_DECL_INT_OVERLOADS)
748*f5c631daSSadaf Ebrahimi #undef VIXL_DECL_INT_OVERLOADS
749*f5c631daSSadaf Ebrahimi #undef VIXL_INT_TYPES
750*f5c631daSSadaf Ebrahimi 
751*f5c631daSSadaf Ebrahimi   // TODO: `Operand` can currently only hold an int64_t, so some large, unsigned
752*f5c631daSSadaf Ebrahimi   // values will be misrepresented here.
753*f5c631daSSadaf Ebrahimi   explicit IntegerOperand(const Operand& operand)
754*f5c631daSSadaf Ebrahimi       : raw_bits_(operand.GetEquivalentImmediate()),
755*f5c631daSSadaf Ebrahimi         is_negative_(operand.GetEquivalentImmediate() < 0) {}
756*f5c631daSSadaf Ebrahimi 
IsIntN(unsigned n)757*f5c631daSSadaf Ebrahimi   bool IsIntN(unsigned n) const {
758*f5c631daSSadaf Ebrahimi     return is_negative_ ? vixl::IsIntN(n, RawbitsToInt64(raw_bits_))
759*f5c631daSSadaf Ebrahimi                         : vixl::IsIntN(n, raw_bits_);
760*f5c631daSSadaf Ebrahimi   }
IsUintN(unsigned n)761*f5c631daSSadaf Ebrahimi   bool IsUintN(unsigned n) const {
762*f5c631daSSadaf Ebrahimi     return !is_negative_ && vixl::IsUintN(n, raw_bits_);
763*f5c631daSSadaf Ebrahimi   }
764*f5c631daSSadaf Ebrahimi 
IsUint8()765*f5c631daSSadaf Ebrahimi   bool IsUint8() const { return IsUintN(8); }
IsUint16()766*f5c631daSSadaf Ebrahimi   bool IsUint16() const { return IsUintN(16); }
IsUint32()767*f5c631daSSadaf Ebrahimi   bool IsUint32() const { return IsUintN(32); }
IsUint64()768*f5c631daSSadaf Ebrahimi   bool IsUint64() const { return IsUintN(64); }
769*f5c631daSSadaf Ebrahimi 
IsInt8()770*f5c631daSSadaf Ebrahimi   bool IsInt8() const { return IsIntN(8); }
IsInt16()771*f5c631daSSadaf Ebrahimi   bool IsInt16() const { return IsIntN(16); }
IsInt32()772*f5c631daSSadaf Ebrahimi   bool IsInt32() const { return IsIntN(32); }
IsInt64()773*f5c631daSSadaf Ebrahimi   bool IsInt64() const { return IsIntN(64); }
774*f5c631daSSadaf Ebrahimi 
FitsInBits(unsigned n)775*f5c631daSSadaf Ebrahimi   bool FitsInBits(unsigned n) const {
776*f5c631daSSadaf Ebrahimi     return is_negative_ ? IsIntN(n) : IsUintN(n);
777*f5c631daSSadaf Ebrahimi   }
FitsInLane(const CPURegister & zd)778*f5c631daSSadaf Ebrahimi   bool FitsInLane(const CPURegister& zd) const {
779*f5c631daSSadaf Ebrahimi     return FitsInBits(zd.GetLaneSizeInBits());
780*f5c631daSSadaf Ebrahimi   }
FitsInSignedLane(const CPURegister & zd)781*f5c631daSSadaf Ebrahimi   bool FitsInSignedLane(const CPURegister& zd) const {
782*f5c631daSSadaf Ebrahimi     return IsIntN(zd.GetLaneSizeInBits());
783*f5c631daSSadaf Ebrahimi   }
FitsInUnsignedLane(const CPURegister & zd)784*f5c631daSSadaf Ebrahimi   bool FitsInUnsignedLane(const CPURegister& zd) const {
785*f5c631daSSadaf Ebrahimi     return IsUintN(zd.GetLaneSizeInBits());
786*f5c631daSSadaf Ebrahimi   }
787*f5c631daSSadaf Ebrahimi 
788*f5c631daSSadaf Ebrahimi   // Cast a value in the range [INT<n>_MIN, UINT<n>_MAX] to an unsigned integer
789*f5c631daSSadaf Ebrahimi   // in the range [0, UINT<n>_MAX] (using two's complement mapping).
AsUintN(unsigned n)790*f5c631daSSadaf Ebrahimi   uint64_t AsUintN(unsigned n) const {
791*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(FitsInBits(n));
792*f5c631daSSadaf Ebrahimi     return raw_bits_ & GetUintMask(n);
793*f5c631daSSadaf Ebrahimi   }
794*f5c631daSSadaf Ebrahimi 
AsUint8()795*f5c631daSSadaf Ebrahimi   uint8_t AsUint8() const { return static_cast<uint8_t>(AsUintN(8)); }
AsUint16()796*f5c631daSSadaf Ebrahimi   uint16_t AsUint16() const { return static_cast<uint16_t>(AsUintN(16)); }
AsUint32()797*f5c631daSSadaf Ebrahimi   uint32_t AsUint32() const { return static_cast<uint32_t>(AsUintN(32)); }
AsUint64()798*f5c631daSSadaf Ebrahimi   uint64_t AsUint64() const { return AsUintN(64); }
799*f5c631daSSadaf Ebrahimi 
800*f5c631daSSadaf Ebrahimi   // Cast a value in the range [INT<n>_MIN, UINT<n>_MAX] to a signed integer in
801*f5c631daSSadaf Ebrahimi   // the range [INT<n>_MIN, INT<n>_MAX] (using two's complement mapping).
AsIntN(unsigned n)802*f5c631daSSadaf Ebrahimi   int64_t AsIntN(unsigned n) const {
803*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(FitsInBits(n));
804*f5c631daSSadaf Ebrahimi     return ExtractSignedBitfield64(n - 1, 0, raw_bits_);
805*f5c631daSSadaf Ebrahimi   }
806*f5c631daSSadaf Ebrahimi 
AsInt8()807*f5c631daSSadaf Ebrahimi   int8_t AsInt8() const { return static_cast<int8_t>(AsIntN(8)); }
AsInt16()808*f5c631daSSadaf Ebrahimi   int16_t AsInt16() const { return static_cast<int16_t>(AsIntN(16)); }
AsInt32()809*f5c631daSSadaf Ebrahimi   int32_t AsInt32() const { return static_cast<int32_t>(AsIntN(32)); }
AsInt64()810*f5c631daSSadaf Ebrahimi   int64_t AsInt64() const { return AsIntN(64); }
811*f5c631daSSadaf Ebrahimi 
812*f5c631daSSadaf Ebrahimi   // Several instructions encode a signed int<N>_t, which is then (optionally)
813*f5c631daSSadaf Ebrahimi   // left-shifted and sign-extended to a Z register lane with a size which may
814*f5c631daSSadaf Ebrahimi   // be larger than N. This helper tries to find an int<N>_t such that the
815*f5c631daSSadaf Ebrahimi   // IntegerOperand's arithmetic value is reproduced in each lane.
816*f5c631daSSadaf Ebrahimi   //
817*f5c631daSSadaf Ebrahimi   // This is the mechanism that allows `Insr(z0.VnB(), 0xff)` to be treated as
818*f5c631daSSadaf Ebrahimi   // `Insr(z0.VnB(), -1)`.
819*f5c631daSSadaf Ebrahimi   template <unsigned N, unsigned kShift, typename T>
TryEncodeAsShiftedIntNForLane(const CPURegister & zd,T * imm)820*f5c631daSSadaf Ebrahimi   bool TryEncodeAsShiftedIntNForLane(const CPURegister& zd, T* imm) const {
821*f5c631daSSadaf Ebrahimi     VIXL_STATIC_ASSERT(std::numeric_limits<T>::digits > N);
822*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(FitsInLane(zd));
823*f5c631daSSadaf Ebrahimi     if ((raw_bits_ & GetUintMask(kShift)) != 0) return false;
824*f5c631daSSadaf Ebrahimi 
825*f5c631daSSadaf Ebrahimi     // Reverse the specified left-shift.
826*f5c631daSSadaf Ebrahimi     IntegerOperand unshifted(*this);
827*f5c631daSSadaf Ebrahimi     unshifted.ArithmeticShiftRight(kShift);
828*f5c631daSSadaf Ebrahimi 
829*f5c631daSSadaf Ebrahimi     if (unshifted.IsIntN(N)) {
830*f5c631daSSadaf Ebrahimi       // This is trivial, since sign-extension produces the same arithmetic
831*f5c631daSSadaf Ebrahimi       // value irrespective of the destination size.
832*f5c631daSSadaf Ebrahimi       *imm = static_cast<T>(unshifted.AsIntN(N));
833*f5c631daSSadaf Ebrahimi       return true;
834*f5c631daSSadaf Ebrahimi     }
835*f5c631daSSadaf Ebrahimi 
836*f5c631daSSadaf Ebrahimi     // Otherwise, we might be able to use the sign-extension to produce the
837*f5c631daSSadaf Ebrahimi     // desired bit pattern. We can only do this for values in the range
838*f5c631daSSadaf Ebrahimi     // [INT<N>_MAX + 1, UINT<N>_MAX], where the highest set bit is the sign bit.
839*f5c631daSSadaf Ebrahimi     //
840*f5c631daSSadaf Ebrahimi     // The lane size has to be adjusted to compensate for `kShift`, since the
841*f5c631daSSadaf Ebrahimi     // high bits will be dropped when the encoded value is left-shifted.
842*f5c631daSSadaf Ebrahimi     if (unshifted.IsUintN(zd.GetLaneSizeInBits() - kShift)) {
843*f5c631daSSadaf Ebrahimi       int64_t encoded = unshifted.AsIntN(zd.GetLaneSizeInBits() - kShift);
844*f5c631daSSadaf Ebrahimi       if (vixl::IsIntN(N, encoded)) {
845*f5c631daSSadaf Ebrahimi         *imm = static_cast<T>(encoded);
846*f5c631daSSadaf Ebrahimi         return true;
847*f5c631daSSadaf Ebrahimi       }
848*f5c631daSSadaf Ebrahimi     }
849*f5c631daSSadaf Ebrahimi     return false;
850*f5c631daSSadaf Ebrahimi   }
851*f5c631daSSadaf Ebrahimi 
852*f5c631daSSadaf Ebrahimi   // As above, but `kShift` is written to the `*shift` parameter on success, so
853*f5c631daSSadaf Ebrahimi   // that it is easy to chain calls like this:
854*f5c631daSSadaf Ebrahimi   //
855*f5c631daSSadaf Ebrahimi   //     if (imm.TryEncodeAsShiftedIntNForLane<8, 0>(zd, &imm8, &shift) ||
856*f5c631daSSadaf Ebrahimi   //         imm.TryEncodeAsShiftedIntNForLane<8, 8>(zd, &imm8, &shift)) {
857*f5c631daSSadaf Ebrahimi   //       insn(zd, imm8, shift)
858*f5c631daSSadaf Ebrahimi   //     }
859*f5c631daSSadaf Ebrahimi   template <unsigned N, unsigned kShift, typename T, typename S>
TryEncodeAsShiftedIntNForLane(const CPURegister & zd,T * imm,S * shift)860*f5c631daSSadaf Ebrahimi   bool TryEncodeAsShiftedIntNForLane(const CPURegister& zd,
861*f5c631daSSadaf Ebrahimi                                      T* imm,
862*f5c631daSSadaf Ebrahimi                                      S* shift) const {
863*f5c631daSSadaf Ebrahimi     if (TryEncodeAsShiftedIntNForLane<N, kShift>(zd, imm)) {
864*f5c631daSSadaf Ebrahimi       *shift = kShift;
865*f5c631daSSadaf Ebrahimi       return true;
866*f5c631daSSadaf Ebrahimi     }
867*f5c631daSSadaf Ebrahimi     return false;
868*f5c631daSSadaf Ebrahimi   }
869*f5c631daSSadaf Ebrahimi 
870*f5c631daSSadaf Ebrahimi   // As above, but assume that `kShift` is 0.
871*f5c631daSSadaf Ebrahimi   template <unsigned N, typename T>
TryEncodeAsIntNForLane(const CPURegister & zd,T * imm)872*f5c631daSSadaf Ebrahimi   bool TryEncodeAsIntNForLane(const CPURegister& zd, T* imm) const {
873*f5c631daSSadaf Ebrahimi     return TryEncodeAsShiftedIntNForLane<N, 0>(zd, imm);
874*f5c631daSSadaf Ebrahimi   }
875*f5c631daSSadaf Ebrahimi 
876*f5c631daSSadaf Ebrahimi   // As above, but for unsigned fields. This is usuaully a simple operation, but
877*f5c631daSSadaf Ebrahimi   // is provided for symmetry.
878*f5c631daSSadaf Ebrahimi   template <unsigned N, unsigned kShift, typename T>
TryEncodeAsShiftedUintNForLane(const CPURegister & zd,T * imm)879*f5c631daSSadaf Ebrahimi   bool TryEncodeAsShiftedUintNForLane(const CPURegister& zd, T* imm) const {
880*f5c631daSSadaf Ebrahimi     VIXL_STATIC_ASSERT(std::numeric_limits<T>::digits > N);
881*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(FitsInLane(zd));
882*f5c631daSSadaf Ebrahimi 
883*f5c631daSSadaf Ebrahimi     // TODO: Should we convert -1 to 0xff here?
884*f5c631daSSadaf Ebrahimi     if (is_negative_) return false;
885*f5c631daSSadaf Ebrahimi     USE(zd);
886*f5c631daSSadaf Ebrahimi 
887*f5c631daSSadaf Ebrahimi     if ((raw_bits_ & GetUintMask(kShift)) != 0) return false;
888*f5c631daSSadaf Ebrahimi 
889*f5c631daSSadaf Ebrahimi     if (vixl::IsUintN(N, raw_bits_ >> kShift)) {
890*f5c631daSSadaf Ebrahimi       *imm = static_cast<T>(raw_bits_ >> kShift);
891*f5c631daSSadaf Ebrahimi       return true;
892*f5c631daSSadaf Ebrahimi     }
893*f5c631daSSadaf Ebrahimi     return false;
894*f5c631daSSadaf Ebrahimi   }
895*f5c631daSSadaf Ebrahimi 
896*f5c631daSSadaf Ebrahimi   template <unsigned N, unsigned kShift, typename T, typename S>
TryEncodeAsShiftedUintNForLane(const CPURegister & zd,T * imm,S * shift)897*f5c631daSSadaf Ebrahimi   bool TryEncodeAsShiftedUintNForLane(const CPURegister& zd,
898*f5c631daSSadaf Ebrahimi                                       T* imm,
899*f5c631daSSadaf Ebrahimi                                       S* shift) const {
900*f5c631daSSadaf Ebrahimi     if (TryEncodeAsShiftedUintNForLane<N, kShift>(zd, imm)) {
901*f5c631daSSadaf Ebrahimi       *shift = kShift;
902*f5c631daSSadaf Ebrahimi       return true;
903*f5c631daSSadaf Ebrahimi     }
904*f5c631daSSadaf Ebrahimi     return false;
905*f5c631daSSadaf Ebrahimi   }
906*f5c631daSSadaf Ebrahimi 
IsZero()907*f5c631daSSadaf Ebrahimi   bool IsZero() const { return raw_bits_ == 0; }
IsNegative()908*f5c631daSSadaf Ebrahimi   bool IsNegative() const { return is_negative_; }
IsPositiveOrZero()909*f5c631daSSadaf Ebrahimi   bool IsPositiveOrZero() const { return !is_negative_; }
910*f5c631daSSadaf Ebrahimi 
GetMagnitude()911*f5c631daSSadaf Ebrahimi   uint64_t GetMagnitude() const {
912*f5c631daSSadaf Ebrahimi     return is_negative_ ? -raw_bits_ : raw_bits_;
913*f5c631daSSadaf Ebrahimi   }
914*f5c631daSSadaf Ebrahimi 
915*f5c631daSSadaf Ebrahimi  private:
916*f5c631daSSadaf Ebrahimi   // Shift the arithmetic value right, with sign extension if is_negative_.
ArithmeticShiftRight(int shift)917*f5c631daSSadaf Ebrahimi   void ArithmeticShiftRight(int shift) {
918*f5c631daSSadaf Ebrahimi     VIXL_ASSERT((shift >= 0) && (shift < 64));
919*f5c631daSSadaf Ebrahimi     if (shift == 0) return;
920*f5c631daSSadaf Ebrahimi     if (is_negative_) {
921*f5c631daSSadaf Ebrahimi       raw_bits_ = ExtractSignedBitfield64(63, shift, raw_bits_);
922*f5c631daSSadaf Ebrahimi     } else {
923*f5c631daSSadaf Ebrahimi       raw_bits_ >>= shift;
924*f5c631daSSadaf Ebrahimi     }
925*f5c631daSSadaf Ebrahimi   }
926*f5c631daSSadaf Ebrahimi 
927*f5c631daSSadaf Ebrahimi   uint64_t raw_bits_;
928*f5c631daSSadaf Ebrahimi   bool is_negative_;
929*f5c631daSSadaf Ebrahimi };
930*f5c631daSSadaf Ebrahimi 
931*f5c631daSSadaf Ebrahimi // This an abstraction that can represent a register or memory location. The
932*f5c631daSSadaf Ebrahimi // `MacroAssembler` provides helpers to move data between generic operands.
933*f5c631daSSadaf Ebrahimi class GenericOperand {
934*f5c631daSSadaf Ebrahimi  public:
GenericOperand()935*f5c631daSSadaf Ebrahimi   GenericOperand() { VIXL_ASSERT(!IsValid()); }
936*f5c631daSSadaf Ebrahimi   GenericOperand(const CPURegister& reg);  // NOLINT(runtime/explicit)
937*f5c631daSSadaf Ebrahimi   GenericOperand(const MemOperand& mem_op,
938*f5c631daSSadaf Ebrahimi                  size_t mem_op_size = 0);  // NOLINT(runtime/explicit)
939*f5c631daSSadaf Ebrahimi 
IsValid()940*f5c631daSSadaf Ebrahimi   bool IsValid() const { return cpu_register_.IsValid() != mem_op_.IsValid(); }
941*f5c631daSSadaf Ebrahimi 
942*f5c631daSSadaf Ebrahimi   bool Equals(const GenericOperand& other) const;
943*f5c631daSSadaf Ebrahimi 
IsCPURegister()944*f5c631daSSadaf Ebrahimi   bool IsCPURegister() const {
945*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
946*f5c631daSSadaf Ebrahimi     return cpu_register_.IsValid();
947*f5c631daSSadaf Ebrahimi   }
948*f5c631daSSadaf Ebrahimi 
IsRegister()949*f5c631daSSadaf Ebrahimi   bool IsRegister() const {
950*f5c631daSSadaf Ebrahimi     return IsCPURegister() && cpu_register_.IsRegister();
951*f5c631daSSadaf Ebrahimi   }
952*f5c631daSSadaf Ebrahimi 
IsVRegister()953*f5c631daSSadaf Ebrahimi   bool IsVRegister() const {
954*f5c631daSSadaf Ebrahimi     return IsCPURegister() && cpu_register_.IsVRegister();
955*f5c631daSSadaf Ebrahimi   }
956*f5c631daSSadaf Ebrahimi 
IsSameCPURegisterType(const GenericOperand & other)957*f5c631daSSadaf Ebrahimi   bool IsSameCPURegisterType(const GenericOperand& other) {
958*f5c631daSSadaf Ebrahimi     return IsCPURegister() && other.IsCPURegister() &&
959*f5c631daSSadaf Ebrahimi            GetCPURegister().IsSameType(other.GetCPURegister());
960*f5c631daSSadaf Ebrahimi   }
961*f5c631daSSadaf Ebrahimi 
IsMemOperand()962*f5c631daSSadaf Ebrahimi   bool IsMemOperand() const {
963*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsValid());
964*f5c631daSSadaf Ebrahimi     return mem_op_.IsValid();
965*f5c631daSSadaf Ebrahimi   }
966*f5c631daSSadaf Ebrahimi 
GetCPURegister()967*f5c631daSSadaf Ebrahimi   CPURegister GetCPURegister() const {
968*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsCPURegister());
969*f5c631daSSadaf Ebrahimi     return cpu_register_;
970*f5c631daSSadaf Ebrahimi   }
971*f5c631daSSadaf Ebrahimi 
GetMemOperand()972*f5c631daSSadaf Ebrahimi   MemOperand GetMemOperand() const {
973*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsMemOperand());
974*f5c631daSSadaf Ebrahimi     return mem_op_;
975*f5c631daSSadaf Ebrahimi   }
976*f5c631daSSadaf Ebrahimi 
GetMemOperandSizeInBytes()977*f5c631daSSadaf Ebrahimi   size_t GetMemOperandSizeInBytes() const {
978*f5c631daSSadaf Ebrahimi     VIXL_ASSERT(IsMemOperand());
979*f5c631daSSadaf Ebrahimi     return mem_op_size_;
980*f5c631daSSadaf Ebrahimi   }
981*f5c631daSSadaf Ebrahimi 
GetSizeInBytes()982*f5c631daSSadaf Ebrahimi   size_t GetSizeInBytes() const {
983*f5c631daSSadaf Ebrahimi     return IsCPURegister() ? cpu_register_.GetSizeInBytes()
984*f5c631daSSadaf Ebrahimi                            : GetMemOperandSizeInBytes();
985*f5c631daSSadaf Ebrahimi   }
986*f5c631daSSadaf Ebrahimi 
GetSizeInBits()987*f5c631daSSadaf Ebrahimi   size_t GetSizeInBits() const { return GetSizeInBytes() * kBitsPerByte; }
988*f5c631daSSadaf Ebrahimi 
989*f5c631daSSadaf Ebrahimi  private:
990*f5c631daSSadaf Ebrahimi   CPURegister cpu_register_;
991*f5c631daSSadaf Ebrahimi   MemOperand mem_op_;
992*f5c631daSSadaf Ebrahimi   // The size of the memory region pointed to, in bytes.
993*f5c631daSSadaf Ebrahimi   // We only support sizes up to X/D register sizes.
994*f5c631daSSadaf Ebrahimi   size_t mem_op_size_;
995*f5c631daSSadaf Ebrahimi };
996*f5c631daSSadaf Ebrahimi }
997*f5c631daSSadaf Ebrahimi }  // namespace vixl::aarch64
998*f5c631daSSadaf Ebrahimi 
999*f5c631daSSadaf Ebrahimi #endif  // VIXL_AARCH64_OPERANDS_AARCH64_H_
1000