xref: /aosp_15_r20/art/compiler/utils/riscv64/jni_macro_assembler_riscv64.cc (revision 795d594fd825385562da6b089ea9b2033f3abf5a)
1 /*
2  * Copyright (C) 2023 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "jni_macro_assembler_riscv64.h"
18 
19 #include "base/bit_utils_iterator.h"
20 #include "dwarf/register.h"
21 #include "entrypoints/quick/quick_entrypoints.h"
22 #include "gc_root.h"
23 #include "indirect_reference_table.h"
24 #include "lock_word.h"
25 #include "managed_register_riscv64.h"
26 #include "offsets.h"
27 #include "stack_reference.h"
28 #include "thread.h"
29 
30 namespace art HIDDEN {
31 namespace riscv64 {
32 
33 static constexpr size_t kSpillSize = 8;  // Both GPRs and FPRs
34 
GetCoreAndFpSpillMasks(ArrayRef<const ManagedRegister> callee_save_regs)35 static std::pair<uint32_t, uint32_t> GetCoreAndFpSpillMasks(
36     ArrayRef<const ManagedRegister> callee_save_regs) {
37   uint32_t core_spill_mask = 0u;
38   uint32_t fp_spill_mask = 0u;
39   for (ManagedRegister r : callee_save_regs) {
40     Riscv64ManagedRegister reg = r.AsRiscv64();
41     if (reg.IsXRegister()) {
42       core_spill_mask |= 1u << reg.AsXRegister();
43     } else {
44       DCHECK(reg.IsFRegister());
45       fp_spill_mask |= 1u << reg.AsFRegister();
46     }
47   }
48   DCHECK_EQ(callee_save_regs.size(),
49             dchecked_integral_cast<size_t>(POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask)));
50   return {core_spill_mask, fp_spill_mask};
51 }
52 
53 #define __ asm_.
54 
~Riscv64JNIMacroAssembler()55 Riscv64JNIMacroAssembler::~Riscv64JNIMacroAssembler() {
56 }
57 
FinalizeCode()58 void Riscv64JNIMacroAssembler::FinalizeCode() {
59   __ FinalizeCode();
60 }
61 
BuildFrame(size_t frame_size,ManagedRegister method_reg,ArrayRef<const ManagedRegister> callee_save_regs)62 void Riscv64JNIMacroAssembler::BuildFrame(size_t frame_size,
63                                           ManagedRegister method_reg,
64                                           ArrayRef<const ManagedRegister> callee_save_regs) {
65   // Increase frame to required size.
66   DCHECK_ALIGNED(frame_size, kStackAlignment);
67   // Must at least have space for Method* if we're going to spill it.
68   DCHECK_GE(frame_size,
69             (callee_save_regs.size() + (method_reg.IsRegister() ? 1u : 0u)) * kSpillSize);
70   IncreaseFrameSize(frame_size);
71 
72   // Save callee-saves.
73   auto [core_spill_mask, fp_spill_mask] = GetCoreAndFpSpillMasks(callee_save_regs);
74   size_t offset = frame_size;
75   if ((core_spill_mask & (1u << RA)) != 0u) {
76     offset -= kSpillSize;
77     __ Stored(RA, SP, offset);
78     __ cfi().RelOffset(dwarf::Reg::Riscv64Core(RA), offset);
79   }
80   for (uint32_t reg : HighToLowBits(core_spill_mask & ~(1u << RA))) {
81     offset -= kSpillSize;
82     __ Stored(enum_cast<XRegister>(reg), SP, offset);
83     __ cfi().RelOffset(dwarf::Reg::Riscv64Core(enum_cast<XRegister>(reg)), offset);
84   }
85   for (uint32_t reg : HighToLowBits(fp_spill_mask)) {
86     offset -= kSpillSize;
87     __ FStored(enum_cast<FRegister>(reg), SP, offset);
88     __ cfi().RelOffset(dwarf::Reg::Riscv64Fp(enum_cast<FRegister>(reg)), offset);
89   }
90 
91   if (method_reg.IsRegister()) {
92     // Write ArtMethod*.
93     DCHECK_EQ(A0, method_reg.AsRiscv64().AsXRegister());
94     __ Stored(A0, SP, 0);
95   }
96 }
97 
RemoveFrame(size_t frame_size,ArrayRef<const ManagedRegister> callee_save_regs,bool may_suspend)98 void Riscv64JNIMacroAssembler::RemoveFrame(size_t frame_size,
99                                            ArrayRef<const ManagedRegister> callee_save_regs,
100                                            [[maybe_unused]] bool may_suspend) {
101   cfi().RememberState();
102 
103   // Restore callee-saves.
104   auto [core_spill_mask, fp_spill_mask] = GetCoreAndFpSpillMasks(callee_save_regs);
105   size_t offset = frame_size - callee_save_regs.size() * kSpillSize;
106   for (uint32_t reg : LowToHighBits(fp_spill_mask)) {
107     __ FLoadd(enum_cast<FRegister>(reg), SP, offset);
108     __ cfi().Restore(dwarf::Reg::Riscv64Fp(enum_cast<FRegister>(reg)));
109     offset += kSpillSize;
110   }
111   for (uint32_t reg : LowToHighBits(core_spill_mask & ~(1u << RA))) {
112     __ Loadd(enum_cast<XRegister>(reg), SP, offset);
113     __ cfi().Restore(dwarf::Reg::Riscv64Core(enum_cast<XRegister>(reg)));
114     offset += kSpillSize;
115   }
116   if ((core_spill_mask & (1u << RA)) != 0u) {
117     __ Loadd(RA, SP, offset);
118     __ cfi().Restore(dwarf::Reg::Riscv64Core(RA));
119     offset += kSpillSize;
120   }
121   DCHECK_EQ(offset, frame_size);
122 
123   // Decrease the frame size.
124   DecreaseFrameSize(frame_size);
125 
126   // Return to RA.
127   __ Ret();
128 
129   // The CFI should be restored for any code that follows the exit block.
130   __ cfi().RestoreState();
131   __ cfi().DefCFAOffset(frame_size);
132 }
133 
IncreaseFrameSize(size_t adjust)134 void Riscv64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
135   if (adjust != 0u) {
136     CHECK_ALIGNED(adjust, kStackAlignment);
137     int64_t adjustment = dchecked_integral_cast<int64_t>(adjust);
138     __ AddConst64(SP, SP, -adjustment);
139     __ cfi().AdjustCFAOffset(adjustment);
140   }
141 }
142 
DecreaseFrameSize(size_t adjust)143 void Riscv64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
144   if (adjust != 0u) {
145     CHECK_ALIGNED(adjust, kStackAlignment);
146     int64_t adjustment = dchecked_integral_cast<int64_t>(adjust);
147     __ AddConst64(SP, SP, adjustment);
148     __ cfi().AdjustCFAOffset(-adjustment);
149   }
150 }
151 
CoreRegisterWithSize(ManagedRegister src,size_t size)152 ManagedRegister Riscv64JNIMacroAssembler::CoreRegisterWithSize(ManagedRegister src, size_t size) {
153   DCHECK(src.AsRiscv64().IsXRegister());
154   DCHECK(size == 4u || size == 8u) << size;
155   return src;
156 }
157 
Store(FrameOffset offs,ManagedRegister m_src,size_t size)158 void Riscv64JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) {
159   Store(Riscv64ManagedRegister::FromXRegister(SP), MemberOffset(offs.Int32Value()), m_src, size);
160 }
161 
Store(ManagedRegister m_base,MemberOffset offs,ManagedRegister m_src,size_t size)162 void Riscv64JNIMacroAssembler::Store(ManagedRegister m_base,
163                                      MemberOffset offs,
164                                      ManagedRegister m_src,
165                                      size_t size) {
166   Riscv64ManagedRegister base = m_base.AsRiscv64();
167   Riscv64ManagedRegister src = m_src.AsRiscv64();
168   if (src.IsXRegister()) {
169     if (size == 4u) {
170       __ Storew(src.AsXRegister(), base.AsXRegister(), offs.Int32Value());
171     } else {
172       CHECK_EQ(8u, size);
173       __ Stored(src.AsXRegister(), base.AsXRegister(), offs.Int32Value());
174     }
175   } else {
176     CHECK(src.IsFRegister()) << src;
177     if (size == 4u) {
178       __ FStorew(src.AsFRegister(), base.AsXRegister(), offs.Int32Value());
179     } else {
180       CHECK_EQ(8u, size);
181       __ FStored(src.AsFRegister(), base.AsXRegister(), offs.Int32Value());
182     }
183   }
184 }
185 
StoreRawPtr(FrameOffset offs,ManagedRegister m_src)186 void Riscv64JNIMacroAssembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) {
187   Riscv64ManagedRegister sp = Riscv64ManagedRegister::FromXRegister(SP);
188   Store(sp, MemberOffset(offs.Int32Value()), m_src, static_cast<size_t>(kRiscv64PointerSize));
189 }
190 
StoreStackPointerToThread(ThreadOffset64 offs,bool tag_sp)191 void Riscv64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 offs, bool tag_sp) {
192   XRegister src = SP;
193   ScratchRegisterScope srs(&asm_);
194   if (tag_sp) {
195     XRegister tmp = srs.AllocateXRegister();
196     __ Ori(tmp, SP, 0x2);
197     src = tmp;
198   }
199   __ Stored(src, TR, offs.Int32Value());
200 }
201 
Load(ManagedRegister m_dest,FrameOffset offs,size_t size)202 void Riscv64JNIMacroAssembler::Load(ManagedRegister m_dest, FrameOffset offs, size_t size) {
203   Riscv64ManagedRegister sp = Riscv64ManagedRegister::FromXRegister(SP);
204   Load(m_dest, sp, MemberOffset(offs.Int32Value()), size);
205 }
206 
Load(ManagedRegister m_dest,ManagedRegister m_base,MemberOffset offs,size_t size)207 void Riscv64JNIMacroAssembler::Load(ManagedRegister m_dest,
208                                     ManagedRegister m_base,
209                                     MemberOffset offs,
210                                     size_t size) {
211   Riscv64ManagedRegister base = m_base.AsRiscv64();
212   Riscv64ManagedRegister dest = m_dest.AsRiscv64();
213   if (dest.IsXRegister()) {
214     if (size == 4u) {
215       // The riscv64 native calling convention specifies that integers narrower than XLEN (64)
216       // bits are "widened according to the sign of their type up to 32 bits, then sign-extended
217       // to XLEN bits." The managed ABI already passes integral values this way in registers
218       // and correctly widened to 32 bits on the stack. The `Load()` must sign-extend narrower
219       // types here to pass integral values correctly to the native call.
220       // For `float` args, the upper 32 bits are undefined, so this is fine for them as well.
221       __ Loadw(dest.AsXRegister(), base.AsXRegister(), offs.Int32Value());
222     } else {
223       CHECK_EQ(8u, size);
224       __ Loadd(dest.AsXRegister(), base.AsXRegister(), offs.Int32Value());
225     }
226   } else {
227     CHECK(dest.IsFRegister()) << dest;
228     if (size == 4u) {
229       __ FLoadw(dest.AsFRegister(), base.AsXRegister(), offs.Int32Value());
230     } else {
231       CHECK_EQ(8u, size);
232       __ FLoadd(dest.AsFRegister(), base.AsXRegister(), offs.Int32Value());
233     }
234   }
235 }
236 
LoadRawPtrFromThread(ManagedRegister m_dest,ThreadOffset64 offs)237 void Riscv64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dest, ThreadOffset64 offs) {
238   Riscv64ManagedRegister tr = Riscv64ManagedRegister::FromXRegister(TR);
239   Load(m_dest, tr, MemberOffset(offs.Int32Value()), static_cast<size_t>(kRiscv64PointerSize));
240 }
241 
LoadGcRootWithoutReadBarrier(ManagedRegister m_dest,ManagedRegister m_base,MemberOffset offs)242 void Riscv64JNIMacroAssembler::LoadGcRootWithoutReadBarrier(ManagedRegister m_dest,
243                                                             ManagedRegister m_base,
244                                                             MemberOffset offs) {
245   Riscv64ManagedRegister base = m_base.AsRiscv64();
246   Riscv64ManagedRegister dest = m_dest.AsRiscv64();
247   static_assert(sizeof(uint32_t) == sizeof(GcRoot<mirror::Object>));
248   __ Loadwu(dest.AsXRegister(), base.AsXRegister(), offs.Int32Value());
249 }
250 
LoadStackReference(ManagedRegister m_dest,FrameOffset offs)251 void Riscv64JNIMacroAssembler::LoadStackReference(ManagedRegister m_dest, FrameOffset offs) {
252   // `StackReference<>` and `GcRoot<>` have the same underlying representation, namely
253   // `CompressedReference<>`. And `StackReference<>` does not need a read barrier.
254   static_assert(sizeof(uint32_t) == sizeof(mirror::CompressedReference<mirror::Object>));
255   static_assert(sizeof(uint32_t) == sizeof(StackReference<mirror::Object>));
256   static_assert(sizeof(uint32_t) == sizeof(GcRoot<mirror::Object>));
257   LoadGcRootWithoutReadBarrier(
258       m_dest, Riscv64ManagedRegister::FromXRegister(SP), MemberOffset(offs.Int32Value()));
259 }
260 
MoveArguments(ArrayRef<ArgumentLocation> dests,ArrayRef<ArgumentLocation> srcs,ArrayRef<FrameOffset> refs)261 void Riscv64JNIMacroAssembler::MoveArguments(ArrayRef<ArgumentLocation> dests,
262                                              ArrayRef<ArgumentLocation> srcs,
263                                              ArrayRef<FrameOffset> refs) {
264   size_t arg_count = dests.size();
265   DCHECK_EQ(arg_count, srcs.size());
266   DCHECK_EQ(arg_count, refs.size());
267 
268   auto get_mask = [](ManagedRegister reg) -> uint64_t {
269     Riscv64ManagedRegister riscv64_reg = reg.AsRiscv64();
270     if (riscv64_reg.IsXRegister()) {
271       size_t core_reg_number = static_cast<size_t>(riscv64_reg.AsXRegister());
272       DCHECK_LT(core_reg_number, 32u);
273       return UINT64_C(1) << core_reg_number;
274     } else {
275       DCHECK(riscv64_reg.IsFRegister());
276       size_t fp_reg_number = static_cast<size_t>(riscv64_reg.AsFRegister());
277       DCHECK_LT(fp_reg_number, 32u);
278       return (UINT64_C(1) << 32u) << fp_reg_number;
279     }
280   };
281 
282   // Collect registers to move while storing/copying args to stack slots.
283   // Convert processed references to `jobject`.
284   uint64_t src_regs = 0u;
285   uint64_t dest_regs = 0u;
286   for (size_t i = 0; i != arg_count; ++i) {
287     const ArgumentLocation& src = srcs[i];
288     const ArgumentLocation& dest = dests[i];
289     const FrameOffset ref = refs[i];
290     if (ref != kInvalidReferenceOffset) {
291       DCHECK_EQ(src.GetSize(), kObjectReferenceSize);
292       DCHECK_EQ(dest.GetSize(), static_cast<size_t>(kRiscv64PointerSize));
293     } else {
294       DCHECK(src.GetSize() == 4u || src.GetSize() == 8u) << src.GetSize();
295       DCHECK(dest.GetSize() == 4u || dest.GetSize() == 8u) << dest.GetSize();
296       DCHECK_LE(src.GetSize(), dest.GetSize());
297     }
298     if (dest.IsRegister()) {
299       if (src.IsRegister() && src.GetRegister().Equals(dest.GetRegister())) {
300         // No move is necessary but we may need to convert a reference to a `jobject`.
301         if (ref != kInvalidReferenceOffset) {
302           CreateJObject(dest.GetRegister(), ref, src.GetRegister(), /*null_allowed=*/ i != 0u);
303         }
304       } else {
305         if (src.IsRegister()) {
306           src_regs |= get_mask(src.GetRegister());
307         }
308         dest_regs |= get_mask(dest.GetRegister());
309       }
310     } else {
311       ScratchRegisterScope srs(&asm_);
312       Riscv64ManagedRegister reg = src.IsRegister()
313           ? src.GetRegister().AsRiscv64()
314           : Riscv64ManagedRegister::FromXRegister(srs.AllocateXRegister());
315       if (!src.IsRegister()) {
316         if (ref != kInvalidReferenceOffset) {
317           // We're loading the reference only for comparison with null, so it does not matter
318           // if we sign- or zero-extend but let's correctly zero-extend the reference anyway.
319           __ Loadwu(reg.AsRiscv64().AsXRegister(), SP, src.GetFrameOffset().SizeValue());
320         } else {
321           Load(reg, src.GetFrameOffset(), src.GetSize());
322         }
323       }
324       if (ref != kInvalidReferenceOffset) {
325         DCHECK_NE(i, 0u);
326         CreateJObject(reg, ref, reg, /*null_allowed=*/ true);
327       }
328       Store(dest.GetFrameOffset(), reg, dest.GetSize());
329     }
330   }
331 
332   // Fill destination registers.
333   // There should be no cycles, so this simple algorithm should make progress.
334   while (dest_regs != 0u) {
335     uint64_t old_dest_regs = dest_regs;
336     for (size_t i = 0; i != arg_count; ++i) {
337       const ArgumentLocation& src = srcs[i];
338       const ArgumentLocation& dest = dests[i];
339       const FrameOffset ref = refs[i];
340       if (!dest.IsRegister()) {
341         continue;  // Stored in first loop above.
342       }
343       uint64_t dest_reg_mask = get_mask(dest.GetRegister());
344       if ((dest_reg_mask & dest_regs) == 0u) {
345         continue;  // Equals source, or already filled in one of previous iterations.
346       }
347       if ((dest_reg_mask & src_regs) != 0u) {
348         continue;  // Cannot clobber this register yet.
349       }
350       if (src.IsRegister()) {
351         if (ref != kInvalidReferenceOffset) {
352           DCHECK_NE(i, 0u);  // The `this` arg remains in the same register (handled above).
353           CreateJObject(dest.GetRegister(), ref, src.GetRegister(), /*null_allowed=*/ true);
354         } else {
355           Move(dest.GetRegister(), src.GetRegister(), dest.GetSize());
356         }
357         src_regs &= ~get_mask(src.GetRegister());  // Allow clobbering source register.
358       } else {
359         Load(dest.GetRegister(), src.GetFrameOffset(), src.GetSize());
360         // No `jobject` conversion needed. There are enough arg registers in managed ABI
361         // to hold all references that yield a register arg `jobject` in native ABI.
362         DCHECK_EQ(ref, kInvalidReferenceOffset);
363       }
364       dest_regs &= ~get_mask(dest.GetRegister());  // Destination register was filled.
365     }
366     CHECK_NE(old_dest_regs, dest_regs);
367     DCHECK_EQ(0u, dest_regs & ~old_dest_regs);
368   }
369 }
370 
Move(ManagedRegister m_dest,ManagedRegister m_src,size_t size)371 void Riscv64JNIMacroAssembler::Move(ManagedRegister m_dest, ManagedRegister m_src, size_t size) {
372   // Note: This function is used only for moving between GPRs.
373   // FP argument registers hold the same arguments in managed and native ABIs.
374   DCHECK(size == 4u || size == 8u) << size;
375   Riscv64ManagedRegister dest = m_dest.AsRiscv64();
376   Riscv64ManagedRegister src = m_src.AsRiscv64();
377   DCHECK(dest.IsXRegister());
378   DCHECK(src.IsXRegister());
379   if (!dest.Equals(src)) {
380     __ Mv(dest.AsXRegister(), src.AsXRegister());
381   }
382 }
383 
Move(ManagedRegister m_dest,size_t value)384 void Riscv64JNIMacroAssembler::Move(ManagedRegister m_dest, size_t value) {
385   DCHECK(m_dest.AsRiscv64().IsXRegister());
386   __ LoadConst64(m_dest.AsRiscv64().AsXRegister(), dchecked_integral_cast<int64_t>(value));
387 }
388 
SignExtend(ManagedRegister mreg,size_t size)389 void Riscv64JNIMacroAssembler::SignExtend([[maybe_unused]] ManagedRegister mreg,
390                                           [[maybe_unused]] size_t size) {
391   LOG(FATAL) << "The result is already sign-extended in the native ABI.";
392   UNREACHABLE();
393 }
394 
ZeroExtend(ManagedRegister mreg,size_t size)395 void Riscv64JNIMacroAssembler::ZeroExtend([[maybe_unused]] ManagedRegister mreg,
396                                           [[maybe_unused]] size_t size) {
397   LOG(FATAL) << "The result is already zero-extended in the native ABI.";
398   UNREACHABLE();
399 }
400 
GetCurrentThread(ManagedRegister dest)401 void Riscv64JNIMacroAssembler::GetCurrentThread(ManagedRegister dest) {
402   DCHECK(dest.AsRiscv64().IsXRegister());
403   __ Mv(dest.AsRiscv64().AsXRegister(), TR);
404 }
405 
GetCurrentThread(FrameOffset offset)406 void Riscv64JNIMacroAssembler::GetCurrentThread(FrameOffset offset) {
407   __ Stored(TR, SP, offset.Int32Value());
408 }
409 
DecodeJNITransitionOrLocalJObject(ManagedRegister m_reg,JNIMacroLabel * slow_path,JNIMacroLabel * resume)410 void Riscv64JNIMacroAssembler::DecodeJNITransitionOrLocalJObject(ManagedRegister m_reg,
411                                                                  JNIMacroLabel* slow_path,
412                                                                  JNIMacroLabel* resume) {
413   // This implements the fast-path of `Thread::DecodeJObject()`.
414   constexpr int64_t kGlobalOrWeakGlobalMask = IndirectReferenceTable::GetGlobalOrWeakGlobalMask();
415   DCHECK(IsInt<12>(kGlobalOrWeakGlobalMask));
416   constexpr int64_t kIndirectRefKindMask = IndirectReferenceTable::GetIndirectRefKindMask();
417   DCHECK(IsInt<12>(kIndirectRefKindMask));
418   XRegister reg = m_reg.AsRiscv64().AsXRegister();
419   __ Beqz(reg, Riscv64JNIMacroLabel::Cast(resume)->AsRiscv64());  // Skip test and load for null.
420   __ Andi(TMP, reg, kGlobalOrWeakGlobalMask);
421   __ Bnez(TMP, Riscv64JNIMacroLabel::Cast(slow_path)->AsRiscv64());
422   __ Andi(reg, reg, ~kIndirectRefKindMask);
423   __ Loadwu(reg, reg, 0);
424 }
425 
VerifyObject(ManagedRegister m_src,bool could_be_null)426 void Riscv64JNIMacroAssembler::VerifyObject([[maybe_unused]] ManagedRegister m_src,
427                                             [[maybe_unused]] bool could_be_null) {
428   // TODO: not validating references.
429 }
430 
VerifyObject(FrameOffset src,bool could_be_null)431 void Riscv64JNIMacroAssembler::VerifyObject([[maybe_unused]] FrameOffset src,
432                                             [[maybe_unused]] bool could_be_null) {
433   // TODO: not validating references.
434 }
435 
Jump(ManagedRegister m_base,Offset offs)436 void Riscv64JNIMacroAssembler::Jump(ManagedRegister m_base, Offset offs) {
437   Riscv64ManagedRegister base = m_base.AsRiscv64();
438   CHECK(base.IsXRegister()) << base;
439   ScratchRegisterScope srs(&asm_);
440   XRegister tmp = srs.AllocateXRegister();
441   __ Loadd(tmp, base.AsXRegister(), offs.Int32Value());
442   __ Jr(tmp);
443 }
444 
Call(ManagedRegister m_base,Offset offs)445 void Riscv64JNIMacroAssembler::Call(ManagedRegister m_base, Offset offs) {
446   Riscv64ManagedRegister base = m_base.AsRiscv64();
447   CHECK(base.IsXRegister()) << base;
448   __ Loadd(RA, base.AsXRegister(), offs.Int32Value());
449   __ Jalr(RA);
450 }
451 
452 
CallFromThread(ThreadOffset64 offset)453 void Riscv64JNIMacroAssembler::CallFromThread(ThreadOffset64 offset) {
454   Call(Riscv64ManagedRegister::FromXRegister(TR), offset);
455 }
456 
TryToTransitionFromRunnableToNative(JNIMacroLabel * label,ArrayRef<const ManagedRegister> scratch_regs)457 void Riscv64JNIMacroAssembler::TryToTransitionFromRunnableToNative(
458     JNIMacroLabel* label,
459     ArrayRef<const ManagedRegister> scratch_regs) {
460   constexpr uint32_t kNativeStateValue = Thread::StoredThreadStateValue(ThreadState::kNative);
461   constexpr uint32_t kRunnableStateValue = Thread::StoredThreadStateValue(ThreadState::kRunnable);
462   constexpr ThreadOffset64 thread_flags_offset = Thread::ThreadFlagsOffset<kRiscv64PointerSize>();
463   constexpr ThreadOffset64 thread_held_mutex_mutator_lock_offset =
464       Thread::HeldMutexOffset<kRiscv64PointerSize>(kMutatorLock);
465 
466   DCHECK_GE(scratch_regs.size(), 2u);
467   XRegister scratch = scratch_regs[0].AsRiscv64().AsXRegister();
468   XRegister scratch2 = scratch_regs[1].AsRiscv64().AsXRegister();
469 
470   // CAS release, old_value = kRunnableStateValue, new_value = kNativeStateValue, no flags.
471   Riscv64Label retry;
472   __ Bind(&retry);
473   static_assert(thread_flags_offset.Int32Value() == 0);  // LR/SC require exact address.
474   __ LrW(scratch, TR, AqRl::kNone);
475   {
476     ScopedLrScExtensionsRestriction slser(&asm_);
477     __ Li(scratch2, kNativeStateValue);
478     // If any flags are set, go to the slow path.
479     static_assert(kRunnableStateValue == 0u);
480     __ Bnez(scratch, Riscv64JNIMacroLabel::Cast(label)->AsRiscv64());
481   }
482   __ ScW(scratch, scratch2, TR, AqRl::kRelease);
483   __ Bnez(scratch, &retry);
484 
485   // Clear `self->tlsPtr_.held_mutexes[kMutatorLock]`.
486   __ Stored(Zero, TR, thread_held_mutex_mutator_lock_offset.Int32Value());
487 }
488 
TryToTransitionFromNativeToRunnable(JNIMacroLabel * label,ArrayRef<const ManagedRegister> scratch_regs,ManagedRegister return_reg)489 void Riscv64JNIMacroAssembler::TryToTransitionFromNativeToRunnable(
490     JNIMacroLabel* label,
491     ArrayRef<const ManagedRegister> scratch_regs,
492     ManagedRegister return_reg) {
493   constexpr uint32_t kNativeStateValue = Thread::StoredThreadStateValue(ThreadState::kNative);
494   constexpr uint32_t kRunnableStateValue = Thread::StoredThreadStateValue(ThreadState::kRunnable);
495   constexpr ThreadOffset64 thread_flags_offset = Thread::ThreadFlagsOffset<kRiscv64PointerSize>();
496   constexpr ThreadOffset64 thread_held_mutex_mutator_lock_offset =
497       Thread::HeldMutexOffset<kRiscv64PointerSize>(kMutatorLock);
498   constexpr ThreadOffset64 thread_mutator_lock_offset =
499       Thread::MutatorLockOffset<kRiscv64PointerSize>();
500 
501   DCHECK_GE(scratch_regs.size(), 2u);
502   DCHECK(!scratch_regs[0].AsRiscv64().Overlaps(return_reg.AsRiscv64()));
503   XRegister scratch = scratch_regs[0].AsRiscv64().AsXRegister();
504   DCHECK(!scratch_regs[1].AsRiscv64().Overlaps(return_reg.AsRiscv64()));
505   XRegister scratch2 = scratch_regs[1].AsRiscv64().AsXRegister();
506 
507   // CAS acquire, old_value = kNativeStateValue, new_value = kRunnableStateValue, no flags.
508   Riscv64Label retry;
509   __ Bind(&retry);
510   static_assert(thread_flags_offset.Int32Value() == 0);  // LR/SC require exact address.
511   __ LrW(scratch, TR, AqRl::kAcquire);
512   {
513     ScopedLrScExtensionsRestriction slser(&asm_);
514     __ Li(scratch2, kNativeStateValue);
515     // If any flags are set, or the state is not Native, go to the slow path.
516     // (While the thread can theoretically transition between different Suspended states,
517     // it would be very unexpected to see a state other than Native at this point.)
518     __ Bne(scratch, scratch2, Riscv64JNIMacroLabel::Cast(label)->AsRiscv64());
519   }
520   static_assert(kRunnableStateValue == 0u);
521   __ ScW(scratch, Zero, TR, AqRl::kNone);
522   __ Bnez(scratch, &retry);
523 
524   // Set `self->tlsPtr_.held_mutexes[kMutatorLock]` to the mutator lock.
525   __ Loadd(scratch, TR, thread_mutator_lock_offset.Int32Value());
526   __ Stored(scratch, TR, thread_held_mutex_mutator_lock_offset.Int32Value());
527 }
528 
SuspendCheck(JNIMacroLabel * label)529 void Riscv64JNIMacroAssembler::SuspendCheck(JNIMacroLabel* label) {
530   ScratchRegisterScope srs(&asm_);
531   XRegister tmp = srs.AllocateXRegister();
532   __ Loadw(tmp, TR, Thread::ThreadFlagsOffset<kRiscv64PointerSize>().Int32Value());
533   DCHECK(IsInt<12>(dchecked_integral_cast<int32_t>(Thread::SuspendOrCheckpointRequestFlags())));
534   __ Andi(tmp, tmp, dchecked_integral_cast<int32_t>(Thread::SuspendOrCheckpointRequestFlags()));
535   __ Bnez(tmp, Riscv64JNIMacroLabel::Cast(label)->AsRiscv64());
536 }
537 
ExceptionPoll(JNIMacroLabel * label)538 void Riscv64JNIMacroAssembler::ExceptionPoll(JNIMacroLabel* label) {
539   ScratchRegisterScope srs(&asm_);
540   XRegister tmp = srs.AllocateXRegister();
541   __ Loadd(tmp, TR, Thread::ExceptionOffset<kRiscv64PointerSize>().Int32Value());
542   __ Bnez(tmp, Riscv64JNIMacroLabel::Cast(label)->AsRiscv64());
543 }
544 
DeliverPendingException()545 void Riscv64JNIMacroAssembler::DeliverPendingException() {
546   // Pass exception object as argument.
547   // Don't care about preserving A0 as this won't return.
548   // Note: The scratch register from `ExceptionPoll()` may have been clobbered.
549   __ Loadd(A0, TR, Thread::ExceptionOffset<kRiscv64PointerSize>().Int32Value());
550   __ Loadd(RA, TR, QUICK_ENTRYPOINT_OFFSET(kRiscv64PointerSize, pDeliverException).Int32Value());
551   __ Jalr(RA);
552   // Call should never return.
553   __ Unimp();
554 }
555 
CreateLabel()556 std::unique_ptr<JNIMacroLabel> Riscv64JNIMacroAssembler::CreateLabel() {
557   return std::unique_ptr<JNIMacroLabel>(new (asm_.GetAllocator()) Riscv64JNIMacroLabel());
558 }
559 
Jump(JNIMacroLabel * label)560 void Riscv64JNIMacroAssembler::Jump(JNIMacroLabel* label) {
561   CHECK(label != nullptr);
562   __ J(down_cast<Riscv64Label*>(Riscv64JNIMacroLabel::Cast(label)->AsRiscv64()));
563 }
564 
TestGcMarking(JNIMacroLabel * label,JNIMacroUnaryCondition cond)565 void Riscv64JNIMacroAssembler::TestGcMarking(JNIMacroLabel* label, JNIMacroUnaryCondition cond) {
566   CHECK(label != nullptr);
567 
568   DCHECK_EQ(Thread::IsGcMarkingSize(), 4u);
569 
570   ScratchRegisterScope srs(&asm_);
571   XRegister test_reg = srs.AllocateXRegister();
572   int32_t is_gc_marking_offset = Thread::IsGcMarkingOffset<kRiscv64PointerSize>().Int32Value();
573   __ Loadw(test_reg, TR, is_gc_marking_offset);
574   switch (cond) {
575     case JNIMacroUnaryCondition::kZero:
576       __ Beqz(test_reg, down_cast<Riscv64Label*>(Riscv64JNIMacroLabel::Cast(label)->AsRiscv64()));
577       break;
578     case JNIMacroUnaryCondition::kNotZero:
579       __ Bnez(test_reg, down_cast<Riscv64Label*>(Riscv64JNIMacroLabel::Cast(label)->AsRiscv64()));
580       break;
581   }
582 }
583 
TestMarkBit(ManagedRegister m_ref,JNIMacroLabel * label,JNIMacroUnaryCondition cond)584 void Riscv64JNIMacroAssembler::TestMarkBit(ManagedRegister m_ref,
585                                            JNIMacroLabel* label,
586                                            JNIMacroUnaryCondition cond) {
587   XRegister ref = m_ref.AsRiscv64().AsXRegister();
588   ScratchRegisterScope srs(&asm_);
589   XRegister tmp = srs.AllocateXRegister();
590   __ Loadw(tmp, ref, mirror::Object::MonitorOffset().Int32Value());
591   // Move the bit we want to check to the sign bit, so that we can use BGEZ/BLTZ
592   // to check it. Extracting the bit for BEQZ/BNEZ would require one more instruction.
593   static_assert(LockWord::kMarkBitStateSize == 1u);
594   __ Slliw(tmp, tmp, 31 - LockWord::kMarkBitStateShift);
595   switch (cond) {
596     case JNIMacroUnaryCondition::kZero:
597       __ Bgez(tmp, Riscv64JNIMacroLabel::Cast(label)->AsRiscv64());
598       break;
599     case JNIMacroUnaryCondition::kNotZero:
600       __ Bltz(tmp, Riscv64JNIMacroLabel::Cast(label)->AsRiscv64());
601       break;
602   }
603 }
604 
TestByteAndJumpIfNotZero(uintptr_t address,JNIMacroLabel * label)605 void Riscv64JNIMacroAssembler::TestByteAndJumpIfNotZero(uintptr_t address, JNIMacroLabel* label) {
606   int32_t small_offset = dchecked_integral_cast<int32_t>(address & 0xfff) -
607                          dchecked_integral_cast<int32_t>((address & 0x800) << 1);
608   int64_t remainder = static_cast<int64_t>(address) - small_offset;
609   ScratchRegisterScope srs(&asm_);
610   XRegister tmp = srs.AllocateXRegister();
611   __ LoadConst64(tmp, remainder);
612   __ Lb(tmp, tmp, small_offset);
613   __ Bnez(tmp, down_cast<Riscv64Label*>(Riscv64JNIMacroLabel::Cast(label)->AsRiscv64()));
614 }
615 
Bind(JNIMacroLabel * label)616 void Riscv64JNIMacroAssembler::Bind(JNIMacroLabel* label) {
617   CHECK(label != nullptr);
618   __ Bind(Riscv64JNIMacroLabel::Cast(label)->AsRiscv64());
619 }
620 
CreateJObject(ManagedRegister m_dest,FrameOffset spilled_reference_offset,ManagedRegister m_ref,bool null_allowed)621 void Riscv64JNIMacroAssembler::CreateJObject(ManagedRegister m_dest,
622                                              FrameOffset spilled_reference_offset,
623                                              ManagedRegister m_ref,
624                                              bool null_allowed) {
625   Riscv64ManagedRegister dest = m_dest.AsRiscv64();
626   Riscv64ManagedRegister ref = m_ref.AsRiscv64();
627   DCHECK(dest.IsXRegister());
628   DCHECK(ref.IsXRegister());
629 
630   Riscv64Label null_label;
631   if (null_allowed) {
632     if (!dest.Equals(ref)) {
633       __ Li(dest.AsXRegister(), 0);
634     }
635     __ Beqz(ref.AsXRegister(), &null_label);
636   }
637   __ AddConst64(dest.AsXRegister(), SP, spilled_reference_offset.Int32Value());
638   if (null_allowed) {
639     __ Bind(&null_label);
640   }
641 }
642 
643 #undef __
644 
645 }  // namespace riscv64
646 }  // namespace art
647