1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "jni_macro_assembler_x86.h"
18
19 #include "base/casts.h"
20 #include "entrypoints/quick/quick_entrypoints.h"
21 #include "indirect_reference_table.h"
22 #include "lock_word.h"
23 #include "thread.h"
24 #include "utils/assembler.h"
25
26 namespace art HIDDEN {
27 namespace x86 {
28
GetScratchRegister()29 static Register GetScratchRegister() {
30 // ECX is an argument register on entry and gets spilled in BuildFrame().
31 // After that, we can use it as a scratch register.
32 return ECX;
33 }
34
DWARFReg(Register reg)35 static dwarf::Reg DWARFReg(Register reg) {
36 return dwarf::Reg::X86Core(static_cast<int>(reg));
37 }
38
39 constexpr size_t kFramePointerSize = 4;
40
41 static constexpr size_t kNativeStackAlignment = 16;
42 static_assert(kNativeStackAlignment == kStackAlignment);
43
44 #define __ asm_.
45
BuildFrame(size_t frame_size,ManagedRegister method_reg,ArrayRef<const ManagedRegister> spill_regs)46 void X86JNIMacroAssembler::BuildFrame(size_t frame_size,
47 ManagedRegister method_reg,
48 ArrayRef<const ManagedRegister> spill_regs) {
49 DCHECK_EQ(CodeSize(), 0U); // Nothing emitted yet.
50 cfi().SetCurrentCFAOffset(4); // Return address on stack.
51 if (frame_size == kFramePointerSize) {
52 // For @CriticalNative tail call.
53 CHECK(method_reg.IsNoRegister());
54 CHECK(spill_regs.empty());
55 } else if (method_reg.IsNoRegister()) {
56 CHECK_ALIGNED(frame_size, kNativeStackAlignment);
57 } else {
58 CHECK_ALIGNED(frame_size, kStackAlignment);
59 }
60 int gpr_count = 0;
61 for (int i = spill_regs.size() - 1; i >= 0; --i) {
62 Register spill = spill_regs[i].AsX86().AsCpuRegister();
63 __ pushl(spill);
64 gpr_count++;
65 cfi().AdjustCFAOffset(kFramePointerSize);
66 cfi().RelOffset(DWARFReg(spill), 0);
67 }
68
69 // return address then method on stack.
70 int32_t adjust = frame_size - gpr_count * kFramePointerSize -
71 kFramePointerSize /*return address*/ -
72 (method_reg.IsRegister() ? kFramePointerSize /*method*/ : 0u);
73 if (adjust != 0) {
74 __ addl(ESP, Immediate(-adjust));
75 cfi().AdjustCFAOffset(adjust);
76 }
77 if (method_reg.IsRegister()) {
78 __ pushl(method_reg.AsX86().AsCpuRegister());
79 cfi().AdjustCFAOffset(kFramePointerSize);
80 }
81 DCHECK_EQ(static_cast<size_t>(cfi().GetCurrentCFAOffset()), frame_size);
82 }
83
RemoveFrame(size_t frame_size,ArrayRef<const ManagedRegister> spill_regs,bool may_suspend)84 void X86JNIMacroAssembler::RemoveFrame(size_t frame_size,
85 ArrayRef<const ManagedRegister> spill_regs,
86 [[maybe_unused]] bool may_suspend) {
87 CHECK_ALIGNED(frame_size, kNativeStackAlignment);
88 cfi().RememberState();
89 // -kFramePointerSize for ArtMethod*.
90 int adjust = frame_size - spill_regs.size() * kFramePointerSize - kFramePointerSize;
91 if (adjust != 0) {
92 __ addl(ESP, Immediate(adjust));
93 cfi().AdjustCFAOffset(-adjust);
94 }
95 for (size_t i = 0; i < spill_regs.size(); ++i) {
96 Register spill = spill_regs[i].AsX86().AsCpuRegister();
97 __ popl(spill);
98 cfi().AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
99 cfi().Restore(DWARFReg(spill));
100 }
101 __ ret();
102 // The CFI should be restored for any code that follows the exit block.
103 cfi().RestoreState();
104 cfi().DefCFAOffset(frame_size);
105 }
106
IncreaseFrameSize(size_t adjust)107 void X86JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
108 if (adjust != 0u) {
109 CHECK_ALIGNED(adjust, kNativeStackAlignment);
110 __ addl(ESP, Immediate(-adjust));
111 cfi().AdjustCFAOffset(adjust);
112 }
113 }
114
DecreaseFrameSizeImpl(X86Assembler * assembler,size_t adjust)115 static void DecreaseFrameSizeImpl(X86Assembler* assembler, size_t adjust) {
116 if (adjust != 0u) {
117 CHECK_ALIGNED(adjust, kNativeStackAlignment);
118 assembler->addl(ESP, Immediate(adjust));
119 assembler->cfi().AdjustCFAOffset(-adjust);
120 }
121 }
122
CoreRegisterWithSize(ManagedRegister src,size_t size)123 ManagedRegister X86JNIMacroAssembler::CoreRegisterWithSize(ManagedRegister src, size_t size) {
124 DCHECK(src.AsX86().IsCpuRegister());
125 DCHECK_EQ(size, 4u);
126 return src;
127 }
128
DecreaseFrameSize(size_t adjust)129 void X86JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
130 DecreaseFrameSizeImpl(&asm_, adjust);
131 }
132
Store(FrameOffset offs,ManagedRegister msrc,size_t size)133 void X86JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
134 Store(X86ManagedRegister::FromCpuRegister(ESP), MemberOffset(offs.Int32Value()), msrc, size);
135 }
136
Store(ManagedRegister mbase,MemberOffset offs,ManagedRegister msrc,size_t size)137 void X86JNIMacroAssembler::Store(ManagedRegister mbase,
138 MemberOffset offs,
139 ManagedRegister msrc,
140 size_t size) {
141 X86ManagedRegister base = mbase.AsX86();
142 X86ManagedRegister src = msrc.AsX86();
143 if (src.IsNoRegister()) {
144 CHECK_EQ(0u, size);
145 } else if (src.IsCpuRegister()) {
146 CHECK_EQ(4u, size);
147 __ movl(Address(base.AsCpuRegister(), offs), src.AsCpuRegister());
148 } else if (src.IsRegisterPair()) {
149 CHECK_EQ(8u, size);
150 __ movl(Address(base.AsCpuRegister(), offs), src.AsRegisterPairLow());
151 __ movl(Address(base.AsCpuRegister(), FrameOffset(offs.Int32Value()+4)),
152 src.AsRegisterPairHigh());
153 } else if (src.IsX87Register()) {
154 if (size == 4) {
155 __ fstps(Address(base.AsCpuRegister(), offs));
156 } else {
157 __ fstpl(Address(base.AsCpuRegister(), offs));
158 }
159 } else {
160 CHECK(src.IsXmmRegister());
161 if (size == 4) {
162 __ movss(Address(base.AsCpuRegister(), offs), src.AsXmmRegister());
163 } else {
164 __ movsd(Address(base.AsCpuRegister(), offs), src.AsXmmRegister());
165 }
166 }
167 }
168
StoreRawPtr(FrameOffset dest,ManagedRegister msrc)169 void X86JNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
170 X86ManagedRegister src = msrc.AsX86();
171 CHECK(src.IsCpuRegister());
172 __ movl(Address(ESP, dest), src.AsCpuRegister());
173 }
174
StoreStackPointerToThread(ThreadOffset32 thr_offs,bool tag_sp)175 void X86JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs, bool tag_sp) {
176 if (tag_sp) {
177 // There is no free register, store contents onto stack and restore back later.
178 Register scratch = ECX;
179 __ movl(Address(ESP, -32), scratch);
180 __ movl(scratch, ESP);
181 __ orl(scratch, Immediate(0x2));
182 __ fs()->movl(Address::Absolute(thr_offs), scratch);
183 __ movl(scratch, Address(ESP, -32));
184 } else {
185 __ fs()->movl(Address::Absolute(thr_offs), ESP);
186 }
187 }
188
Load(ManagedRegister mdest,FrameOffset src,size_t size)189 void X86JNIMacroAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
190 Load(mdest, X86ManagedRegister::FromCpuRegister(ESP), MemberOffset(src.Int32Value()), size);
191 }
192
Load(ManagedRegister mdest,ManagedRegister mbase,MemberOffset offs,size_t size)193 void X86JNIMacroAssembler::Load(ManagedRegister mdest,
194 ManagedRegister mbase,
195 MemberOffset offs,
196 size_t size) {
197 X86ManagedRegister dest = mdest.AsX86();
198 X86ManagedRegister base = mbase.AsX86();
199 if (dest.IsNoRegister()) {
200 CHECK_EQ(0u, size);
201 } else if (dest.IsCpuRegister()) {
202 CHECK_EQ(4u, size);
203 __ movl(dest.AsCpuRegister(), Address(base.AsCpuRegister(), offs));
204 } else if (dest.IsRegisterPair()) {
205 CHECK_EQ(8u, size);
206 __ movl(dest.AsRegisterPairLow(), Address(base.AsCpuRegister(), offs));
207 __ movl(dest.AsRegisterPairHigh(),
208 Address(base.AsCpuRegister(), FrameOffset(offs.Int32Value()+4)));
209 } else if (dest.IsX87Register()) {
210 if (size == 4) {
211 __ flds(Address(base.AsCpuRegister(), offs));
212 } else {
213 __ fldl(Address(base.AsCpuRegister(), offs));
214 }
215 } else {
216 CHECK(dest.IsXmmRegister());
217 if (size == 4) {
218 __ movss(dest.AsXmmRegister(), Address(base.AsCpuRegister(), offs));
219 } else {
220 __ movsd(dest.AsXmmRegister(), Address(base.AsCpuRegister(), offs));
221 }
222 }
223 }
224
LoadRawPtrFromThread(ManagedRegister mdest,ThreadOffset32 offs)225 void X86JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) {
226 X86ManagedRegister dest = mdest.AsX86();
227 CHECK(dest.IsCpuRegister());
228 __ fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs));
229 }
230
SignExtend(ManagedRegister mreg,size_t size)231 void X86JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
232 X86ManagedRegister reg = mreg.AsX86();
233 CHECK(size == 1 || size == 2) << size;
234 CHECK(reg.IsCpuRegister()) << reg;
235 if (size == 1) {
236 __ movsxb(reg.AsCpuRegister(), reg.AsByteRegister());
237 } else {
238 __ movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
239 }
240 }
241
ZeroExtend(ManagedRegister mreg,size_t size)242 void X86JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
243 X86ManagedRegister reg = mreg.AsX86();
244 CHECK(size == 1 || size == 2) << size;
245 CHECK(reg.IsCpuRegister()) << reg;
246 if (size == 1) {
247 __ movzxb(reg.AsCpuRegister(), reg.AsByteRegister());
248 } else {
249 __ movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
250 }
251 }
252
MoveArguments(ArrayRef<ArgumentLocation> dests,ArrayRef<ArgumentLocation> srcs,ArrayRef<FrameOffset> refs)253 void X86JNIMacroAssembler::MoveArguments(ArrayRef<ArgumentLocation> dests,
254 ArrayRef<ArgumentLocation> srcs,
255 ArrayRef<FrameOffset> refs) {
256 size_t arg_count = dests.size();
257 DCHECK_EQ(arg_count, srcs.size());
258 DCHECK_EQ(arg_count, refs.size());
259
260 // Store register args to stack slots. Convert processed references to `jobject`.
261 bool found_hidden_arg = false;
262 for (size_t i = 0; i != arg_count; ++i) {
263 const ArgumentLocation& src = srcs[i];
264 const ArgumentLocation& dest = dests[i];
265 const FrameOffset ref = refs[i];
266 DCHECK_EQ(src.GetSize(), dest.GetSize()); // Even for references.
267 if (src.IsRegister()) {
268 if (UNLIKELY(dest.IsRegister())) {
269 if (dest.GetRegister().Equals(src.GetRegister())) {
270 // JNI compiler sometimes adds a no-op move.
271 continue;
272 }
273 // Native ABI has only stack arguments but we may pass one "hidden arg" in register.
274 CHECK(!found_hidden_arg);
275 found_hidden_arg = true;
276 DCHECK_EQ(ref, kInvalidReferenceOffset);
277 DCHECK(
278 !dest.GetRegister().Equals(X86ManagedRegister::FromCpuRegister(GetScratchRegister())));
279 Move(dest.GetRegister(), src.GetRegister(), dest.GetSize());
280 } else {
281 if (ref != kInvalidReferenceOffset) {
282 // Note: We can clobber `src` here as the register cannot hold more than one argument.
283 // This overload of `CreateJObject()` currently does not use the scratch
284 // register ECX, so this shall not clobber another argument.
285 CreateJObject(src.GetRegister(), ref, src.GetRegister(), /*null_allowed=*/ i != 0u);
286 }
287 Store(dest.GetFrameOffset(), src.GetRegister(), dest.GetSize());
288 }
289 } else {
290 // Delay copying until we have spilled all registers, including the scratch register ECX.
291 }
292 }
293
294 // Copy incoming stack args. Convert processed references to `jobject`.
295 for (size_t i = 0; i != arg_count; ++i) {
296 const ArgumentLocation& src = srcs[i];
297 const ArgumentLocation& dest = dests[i];
298 const FrameOffset ref = refs[i];
299 DCHECK_EQ(src.GetSize(), dest.GetSize()); // Even for references.
300 if (!src.IsRegister()) {
301 DCHECK(!dest.IsRegister());
302 if (ref != kInvalidReferenceOffset) {
303 DCHECK_EQ(srcs[i].GetFrameOffset(), refs[i]);
304 CreateJObject(dest.GetFrameOffset(), ref, /*null_allowed=*/ i != 0u);
305 } else {
306 Copy(dest.GetFrameOffset(), src.GetFrameOffset(), dest.GetSize());
307 }
308 }
309 }
310 }
311
Move(ManagedRegister mdest,ManagedRegister msrc,size_t size)312 void X86JNIMacroAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
313 DCHECK(!mdest.Equals(X86ManagedRegister::FromCpuRegister(GetScratchRegister())));
314 X86ManagedRegister dest = mdest.AsX86();
315 X86ManagedRegister src = msrc.AsX86();
316 if (!dest.Equals(src)) {
317 if (dest.IsCpuRegister() && src.IsCpuRegister()) {
318 __ movl(dest.AsCpuRegister(), src.AsCpuRegister());
319 } else if (src.IsX87Register() && dest.IsXmmRegister()) {
320 // Pass via stack and pop X87 register
321 IncreaseFrameSize(16);
322 if (size == 4) {
323 CHECK_EQ(src.AsX87Register(), ST0);
324 __ fstps(Address(ESP, 0));
325 __ movss(dest.AsXmmRegister(), Address(ESP, 0));
326 } else {
327 CHECK_EQ(src.AsX87Register(), ST0);
328 __ fstpl(Address(ESP, 0));
329 __ movsd(dest.AsXmmRegister(), Address(ESP, 0));
330 }
331 DecreaseFrameSize(16);
332 } else {
333 // TODO: x87, SSE
334 UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
335 }
336 }
337 }
338
Move(ManagedRegister mdest,size_t value)339 void X86JNIMacroAssembler::Move(ManagedRegister mdest, size_t value) {
340 X86ManagedRegister dest = mdest.AsX86();
341 __ movl(dest.AsCpuRegister(), Immediate(value));
342 }
343
Copy(FrameOffset dest,FrameOffset src,size_t size)344 void X86JNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src, size_t size) {
345 DCHECK(size == 4 || size == 8) << size;
346 Register scratch = GetScratchRegister();
347 __ movl(scratch, Address(ESP, src));
348 __ movl(Address(ESP, dest), scratch);
349 if (size == 8) {
350 __ movl(scratch, Address(ESP, FrameOffset(src.Int32Value() + 4)));
351 __ movl(Address(ESP, FrameOffset(dest.Int32Value() + 4)), scratch);
352 }
353 }
354
CreateJObject(ManagedRegister mout_reg,FrameOffset spilled_reference_offset,ManagedRegister min_reg,bool null_allowed)355 void X86JNIMacroAssembler::CreateJObject(ManagedRegister mout_reg,
356 FrameOffset spilled_reference_offset,
357 ManagedRegister min_reg,
358 bool null_allowed) {
359 X86ManagedRegister out_reg = mout_reg.AsX86();
360 X86ManagedRegister in_reg = min_reg.AsX86();
361 CHECK(in_reg.IsCpuRegister());
362 CHECK(out_reg.IsCpuRegister());
363 VerifyObject(in_reg, null_allowed);
364 if (null_allowed) {
365 Label null_arg;
366 if (!out_reg.Equals(in_reg)) {
367 __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
368 }
369 __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
370 __ j(kZero, &null_arg);
371 __ leal(out_reg.AsCpuRegister(), Address(ESP, spilled_reference_offset));
372 __ Bind(&null_arg);
373 } else {
374 __ leal(out_reg.AsCpuRegister(), Address(ESP, spilled_reference_offset));
375 }
376 }
377
CreateJObject(FrameOffset out_off,FrameOffset spilled_reference_offset,bool null_allowed)378 void X86JNIMacroAssembler::CreateJObject(FrameOffset out_off,
379 FrameOffset spilled_reference_offset,
380 bool null_allowed) {
381 Register scratch = GetScratchRegister();
382 if (null_allowed) {
383 Label null_arg;
384 __ movl(scratch, Address(ESP, spilled_reference_offset));
385 __ testl(scratch, scratch);
386 __ j(kZero, &null_arg);
387 __ leal(scratch, Address(ESP, spilled_reference_offset));
388 __ Bind(&null_arg);
389 } else {
390 __ leal(scratch, Address(ESP, spilled_reference_offset));
391 }
392 __ movl(Address(ESP, out_off), scratch);
393 }
394
DecodeJNITransitionOrLocalJObject(ManagedRegister reg,JNIMacroLabel * slow_path,JNIMacroLabel * resume)395 void X86JNIMacroAssembler::DecodeJNITransitionOrLocalJObject(ManagedRegister reg,
396 JNIMacroLabel* slow_path,
397 JNIMacroLabel* resume) {
398 constexpr uint32_t kGlobalOrWeakGlobalMask =
399 dchecked_integral_cast<uint32_t>(IndirectReferenceTable::GetGlobalOrWeakGlobalMask());
400 constexpr uint32_t kIndirectRefKindMask =
401 dchecked_integral_cast<uint32_t>(IndirectReferenceTable::GetIndirectRefKindMask());
402 __ testl(reg.AsX86().AsCpuRegister(), Immediate(kGlobalOrWeakGlobalMask));
403 __ j(kNotZero, X86JNIMacroLabel::Cast(slow_path)->AsX86());
404 __ andl(reg.AsX86().AsCpuRegister(), Immediate(~kIndirectRefKindMask));
405 __ j(kZero, X86JNIMacroLabel::Cast(resume)->AsX86()); // Skip load for null.
406 __ movl(reg.AsX86().AsCpuRegister(), Address(reg.AsX86().AsCpuRegister(), /*disp=*/ 0));
407 }
408
VerifyObject(ManagedRegister,bool)409 void X86JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
410 // TODO: not validating references
411 }
412
VerifyObject(FrameOffset,bool)413 void X86JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
414 // TODO: not validating references
415 }
416
Jump(ManagedRegister mbase,Offset offset)417 void X86JNIMacroAssembler::Jump(ManagedRegister mbase, Offset offset) {
418 X86ManagedRegister base = mbase.AsX86();
419 CHECK(base.IsCpuRegister());
420 __ jmp(Address(base.AsCpuRegister(), offset.Int32Value()));
421 }
422
Call(ManagedRegister mbase,Offset offset)423 void X86JNIMacroAssembler::Call(ManagedRegister mbase, Offset offset) {
424 X86ManagedRegister base = mbase.AsX86();
425 CHECK(base.IsCpuRegister());
426 __ call(Address(base.AsCpuRegister(), offset.Int32Value()));
427 // TODO: place reference map on call
428 }
429
CallFromThread(ThreadOffset32 offset)430 void X86JNIMacroAssembler::CallFromThread(ThreadOffset32 offset) {
431 __ fs()->call(Address::Absolute(offset));
432 }
433
GetCurrentThread(ManagedRegister dest)434 void X86JNIMacroAssembler::GetCurrentThread(ManagedRegister dest) {
435 __ fs()->movl(dest.AsX86().AsCpuRegister(),
436 Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
437 }
438
GetCurrentThread(FrameOffset offset)439 void X86JNIMacroAssembler::GetCurrentThread(FrameOffset offset) {
440 Register scratch = GetScratchRegister();
441 __ fs()->movl(scratch, Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
442 __ movl(Address(ESP, offset), scratch);
443 }
444
TryToTransitionFromRunnableToNative(JNIMacroLabel * label,ArrayRef<const ManagedRegister> scratch_regs)445 void X86JNIMacroAssembler::TryToTransitionFromRunnableToNative(
446 JNIMacroLabel* label, ArrayRef<const ManagedRegister> scratch_regs) {
447 constexpr uint32_t kNativeStateValue = Thread::StoredThreadStateValue(ThreadState::kNative);
448 constexpr uint32_t kRunnableStateValue = Thread::StoredThreadStateValue(ThreadState::kRunnable);
449 constexpr ThreadOffset32 thread_flags_offset = Thread::ThreadFlagsOffset<kX86PointerSize>();
450 constexpr ThreadOffset32 thread_held_mutex_mutator_lock_offset =
451 Thread::HeldMutexOffset<kX86PointerSize>(kMutatorLock);
452
453 // We need to preserve managed argument EAX.
454 DCHECK_GE(scratch_regs.size(), 2u);
455 Register saved_eax = scratch_regs[0].AsX86().AsCpuRegister();
456 Register scratch = scratch_regs[1].AsX86().AsCpuRegister();
457
458 // CAS release, old_value = kRunnableStateValue, new_value = kNativeStateValue, no flags.
459 __ movl(saved_eax, EAX); // Save EAX.
460 static_assert(kRunnableStateValue == 0u);
461 __ xorl(EAX, EAX);
462 __ movl(scratch, Immediate(kNativeStateValue));
463 __ fs()->LockCmpxchgl(Address::Absolute(thread_flags_offset.Uint32Value()), scratch);
464 // LOCK CMPXCHG has full barrier semantics, so we don't need barriers here.
465 __ movl(EAX, saved_eax); // Restore EAX; MOV does not change flags.
466 // If any flags are set, go to the slow path.
467 __ j(kNotZero, X86JNIMacroLabel::Cast(label)->AsX86());
468
469 // Clear `self->tlsPtr_.held_mutexes[kMutatorLock]`.
470 __ fs()->movl(Address::Absolute(thread_held_mutex_mutator_lock_offset.Uint32Value()),
471 Immediate(0));
472 }
473
TryToTransitionFromNativeToRunnable(JNIMacroLabel * label,ArrayRef<const ManagedRegister> scratch_regs,ManagedRegister return_reg)474 void X86JNIMacroAssembler::TryToTransitionFromNativeToRunnable(
475 JNIMacroLabel* label,
476 ArrayRef<const ManagedRegister> scratch_regs,
477 ManagedRegister return_reg) {
478 constexpr uint32_t kNativeStateValue = Thread::StoredThreadStateValue(ThreadState::kNative);
479 constexpr uint32_t kRunnableStateValue = Thread::StoredThreadStateValue(ThreadState::kRunnable);
480 constexpr ThreadOffset32 thread_flags_offset = Thread::ThreadFlagsOffset<kX86PointerSize>();
481 constexpr ThreadOffset32 thread_held_mutex_mutator_lock_offset =
482 Thread::HeldMutexOffset<kX86PointerSize>(kMutatorLock);
483 constexpr ThreadOffset32 thread_mutator_lock_offset =
484 Thread::MutatorLockOffset<kX86PointerSize>();
485
486 size_t scratch_index = 0u;
487 auto get_scratch_reg = [&]() {
488 while (true) {
489 DCHECK_LT(scratch_index, scratch_regs.size());
490 X86ManagedRegister scratch_reg = scratch_regs[scratch_index].AsX86();
491 ++scratch_index;
492 DCHECK(!scratch_reg.Overlaps(return_reg.AsX86()));
493 if (scratch_reg.AsCpuRegister() != EAX) {
494 return scratch_reg.AsCpuRegister();
495 }
496 }
497 };
498 Register scratch = get_scratch_reg();
499 bool preserve_eax = return_reg.AsX86().Overlaps(X86ManagedRegister::FromCpuRegister(EAX));
500 Register saved_eax = preserve_eax ? get_scratch_reg() : kNoRegister;
501
502 // CAS acquire, old_value = kNativeStateValue, new_value = kRunnableStateValue, no flags.
503 if (preserve_eax) {
504 __ movl(saved_eax, EAX); // Save EAX.
505 }
506 __ movl(EAX, Immediate(kNativeStateValue));
507 static_assert(kRunnableStateValue == 0u);
508 __ xorl(scratch, scratch);
509 __ fs()->LockCmpxchgl(Address::Absolute(thread_flags_offset.Uint32Value()), scratch);
510 // LOCK CMPXCHG has full barrier semantics, so we don't need barriers here.
511 if (preserve_eax) {
512 __ movl(EAX, saved_eax); // Restore EAX; MOV does not change flags.
513 }
514 // If any flags are set, or the state is not Native, go to the slow path.
515 // (While the thread can theoretically transition between different Suspended states,
516 // it would be very unexpected to see a state other than Native at this point.)
517 __ j(kNotZero, X86JNIMacroLabel::Cast(label)->AsX86());
518
519 // Set `self->tlsPtr_.held_mutexes[kMutatorLock]` to the mutator lock.
520 __ fs()->movl(scratch, Address::Absolute(thread_mutator_lock_offset.Uint32Value()));
521 __ fs()->movl(Address::Absolute(thread_held_mutex_mutator_lock_offset.Uint32Value()),
522 scratch);
523 }
524
SuspendCheck(JNIMacroLabel * label)525 void X86JNIMacroAssembler::SuspendCheck(JNIMacroLabel* label) {
526 __ fs()->testl(Address::Absolute(Thread::ThreadFlagsOffset<kX86PointerSize>()),
527 Immediate(Thread::SuspendOrCheckpointRequestFlags()));
528 __ j(kNotZero, X86JNIMacroLabel::Cast(label)->AsX86());
529 }
530
ExceptionPoll(JNIMacroLabel * label)531 void X86JNIMacroAssembler::ExceptionPoll(JNIMacroLabel* label) {
532 __ fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()), Immediate(0));
533 __ j(kNotEqual, X86JNIMacroLabel::Cast(label)->AsX86());
534 }
535
DeliverPendingException()536 void X86JNIMacroAssembler::DeliverPendingException() {
537 // Pass exception as argument in EAX
538 __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()));
539 __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pDeliverException)));
540 // this call should never return
541 __ int3();
542 }
543
CreateLabel()544 std::unique_ptr<JNIMacroLabel> X86JNIMacroAssembler::CreateLabel() {
545 return std::unique_ptr<JNIMacroLabel>(new (asm_.GetAllocator()) X86JNIMacroLabel());
546 }
547
Jump(JNIMacroLabel * label)548 void X86JNIMacroAssembler::Jump(JNIMacroLabel* label) {
549 CHECK(label != nullptr);
550 __ jmp(X86JNIMacroLabel::Cast(label)->AsX86());
551 }
552
UnaryConditionToX86Condition(JNIMacroUnaryCondition cond)553 static Condition UnaryConditionToX86Condition(JNIMacroUnaryCondition cond) {
554 switch (cond) {
555 case JNIMacroUnaryCondition::kZero:
556 return kZero;
557 case JNIMacroUnaryCondition::kNotZero:
558 return kNotZero;
559 }
560 }
561
TestGcMarking(JNIMacroLabel * label,JNIMacroUnaryCondition cond)562 void X86JNIMacroAssembler::TestGcMarking(JNIMacroLabel* label, JNIMacroUnaryCondition cond) {
563 CHECK(label != nullptr);
564
565 // CMP self->tls32_.is_gc_marking, 0
566 // Jcc <Offset>
567 DCHECK_EQ(Thread::IsGcMarkingSize(), 4u);
568 __ fs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86PointerSize>()), Immediate(0));
569 __ j(UnaryConditionToX86Condition(cond), X86JNIMacroLabel::Cast(label)->AsX86());
570 }
571
TestMarkBit(ManagedRegister mref,JNIMacroLabel * label,JNIMacroUnaryCondition cond)572 void X86JNIMacroAssembler::TestMarkBit(ManagedRegister mref,
573 JNIMacroLabel* label,
574 JNIMacroUnaryCondition cond) {
575 DCHECK(kUseBakerReadBarrier);
576 Register ref = mref.AsX86().AsCpuRegister();
577 static_assert(LockWord::kMarkBitStateSize == 1u);
578 __ testl(Address(ref, mirror::Object::MonitorOffset().SizeValue()),
579 Immediate(LockWord::kMarkBitStateMaskShifted));
580 __ j(UnaryConditionToX86Condition(cond), X86JNIMacroLabel::Cast(label)->AsX86());
581 }
582
583
TestByteAndJumpIfNotZero(uintptr_t address,JNIMacroLabel * label)584 void X86JNIMacroAssembler::TestByteAndJumpIfNotZero(uintptr_t address, JNIMacroLabel* label) {
585 __ cmpb(Address::Absolute(address), Immediate(0));
586 __ j(kNotZero, X86JNIMacroLabel::Cast(label)->AsX86());
587 }
588
Bind(JNIMacroLabel * label)589 void X86JNIMacroAssembler::Bind(JNIMacroLabel* label) {
590 CHECK(label != nullptr);
591 __ Bind(X86JNIMacroLabel::Cast(label)->AsX86());
592 }
593
594 #undef __
595
596 } // namespace x86
597 } // namespace art
598