1 /*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef BERBERIS_LITE_TRANSLATOR_RISCV64_TO_X86_64_INLINE_INTRINSIC_H_
18 #define BERBERIS_LITE_TRANSLATOR_RISCV64_TO_X86_64_INLINE_INTRINSIC_H_
19
20 #include <cstdint>
21 #include <optional>
22 #include <tuple>
23 #include <type_traits>
24
25 #include "berberis/assembler/x86_64.h"
26 #include "berberis/base/checks.h"
27 #include "berberis/base/dependent_false.h"
28 #include "berberis/guest_state/guest_state.h"
29 #include "berberis/intrinsics/guest_cpu_flags.h"
30 #include "berberis/intrinsics/intrinsics_process_bindings.h"
31 #include "berberis/intrinsics/macro_assembler.h"
32 #include "berberis/runtime_primitives/platform.h"
33
34 namespace berberis::inline_intrinsic {
35
36 template <auto kFunction,
37 typename RegAlloc,
38 typename SIMDRegAlloc,
39 typename AssemblerResType,
40 typename... AssemblerArgType>
41 bool TryInlineIntrinsic(MacroAssembler<x86_64::Assembler>& as,
42 RegAlloc&& reg_alloc,
43 SIMDRegAlloc&& simd_reg_alloc,
44 AssemblerResType result,
45 AssemblerArgType... args);
46
47 template <auto kFunc>
48 class InlineIntrinsic {
49 public:
50 template <typename RegAlloc, typename SIMDRegAlloc, typename ResType, typename... ArgType>
TryInlineWithHostRounding(MacroAssembler<x86_64::Assembler> & as,RegAlloc && reg_alloc,SIMDRegAlloc && simd_reg_alloc,ResType result,ArgType...args)51 static bool TryInlineWithHostRounding(MacroAssembler<x86_64::Assembler>& as,
52 RegAlloc&& reg_alloc,
53 SIMDRegAlloc&& simd_reg_alloc,
54 ResType result,
55 ArgType... args) {
56 std::tuple args_tuple = std::make_tuple(args...);
57 if constexpr (IsTagEq<&intrinsics::FMul<intrinsics::Float64>>) {
58 auto [rm, frm, src1, src2] = args_tuple;
59 if (rm != FPFlags::DYN) {
60 return false;
61 }
62 return TryInlineIntrinsic<&intrinsics::FMulHostRounding<intrinsics::Float64>>(
63 as, reg_alloc, simd_reg_alloc, result, src1, src2);
64 } else if constexpr (IsTagEq<&intrinsics::FMul<intrinsics::Float32>>) {
65 auto [rm, frm, src1, src2] = args_tuple;
66 if (rm != FPFlags::DYN) {
67 return false;
68 }
69 return TryInlineIntrinsic<&intrinsics::FMulHostRounding<intrinsics::Float32>>(
70 as, reg_alloc, simd_reg_alloc, result, src1, src2);
71 } else if constexpr (IsTagEq<&intrinsics::FAdd<intrinsics::Float64>>) {
72 auto [rm, frm, src1, src2] = args_tuple;
73 if (rm != FPFlags::DYN) {
74 return false;
75 }
76 return TryInlineIntrinsic<&intrinsics::FAddHostRounding<intrinsics::Float64>>(
77 as, reg_alloc, simd_reg_alloc, result, src1, src2);
78 } else if constexpr (IsTagEq<&intrinsics::FAdd<intrinsics::Float32>>) {
79 auto [rm, frm, src1, src2] = args_tuple;
80 if (rm != FPFlags::DYN) {
81 return false;
82 }
83 return TryInlineIntrinsic<&intrinsics::FAddHostRounding<intrinsics::Float32>>(
84 as, reg_alloc, simd_reg_alloc, result, src1, src2);
85 } else if constexpr (IsTagEq<&intrinsics::FSub<intrinsics::Float64>>) {
86 auto [rm, frm, src1, src2] = args_tuple;
87 if (rm != FPFlags::DYN) {
88 return false;
89 }
90 return TryInlineIntrinsic<&intrinsics::FSubHostRounding<intrinsics::Float64>>(
91 as, reg_alloc, simd_reg_alloc, result, src1, src2);
92 } else if constexpr (IsTagEq<&intrinsics::FSub<intrinsics::Float32>>) {
93 auto [rm, frm, src1, src2] = args_tuple;
94 if (rm != FPFlags::DYN) {
95 return false;
96 }
97 return TryInlineIntrinsic<&intrinsics::FSubHostRounding<intrinsics::Float32>>(
98 as, reg_alloc, simd_reg_alloc, result, src1, src2);
99 } else if constexpr (IsTagEq<&intrinsics::FDiv<intrinsics::Float64>>) {
100 auto [rm, frm, src1, src2] = args_tuple;
101 if (rm != FPFlags::DYN) {
102 return false;
103 }
104 return TryInlineIntrinsic<&intrinsics::FDivHostRounding<intrinsics::Float64>>(
105 as, reg_alloc, simd_reg_alloc, result, src1, src2);
106 } else if constexpr (IsTagEq<&intrinsics::FDiv<intrinsics::Float32>>) {
107 auto [rm, frm, src1, src2] = args_tuple;
108 if (rm != FPFlags::DYN) {
109 return false;
110 }
111 return TryInlineIntrinsic<&intrinsics::FDivHostRounding<intrinsics::Float32>>(
112 as, reg_alloc, simd_reg_alloc, result, src1, src2);
113 } else if constexpr (IsTagEq<&intrinsics::FCvtFloatToInteger<int64_t, intrinsics::Float64>>) {
114 auto [rm, frm, src] = args_tuple;
115 if (rm != FPFlags::DYN) {
116 return false;
117 }
118 return TryInlineIntrinsic<
119 &intrinsics::FCvtFloatToIntegerHostRounding<int64_t, intrinsics::Float64>>(
120 as, reg_alloc, simd_reg_alloc, result, src);
121 } else if constexpr (IsTagEq<&intrinsics::FCvtFloatToInteger<int64_t, intrinsics::Float32>>) {
122 auto [rm, frm, src] = args_tuple;
123 if (rm != FPFlags::DYN) {
124 return false;
125 }
126 return TryInlineIntrinsic<
127 &intrinsics::FCvtFloatToIntegerHostRounding<int64_t, intrinsics::Float32>>(
128 as, reg_alloc, simd_reg_alloc, result, src);
129 } else if constexpr (IsTagEq<&intrinsics::FCvtFloatToInteger<int32_t, intrinsics::Float64>>) {
130 auto [rm, frm, src] = args_tuple;
131 if (rm != FPFlags::DYN) {
132 return false;
133 }
134 return TryInlineIntrinsic<
135 &intrinsics::FCvtFloatToIntegerHostRounding<int32_t, intrinsics::Float64>>(
136 as, reg_alloc, simd_reg_alloc, result, src);
137 } else if constexpr (IsTagEq<&intrinsics::FCvtFloatToInteger<int32_t, intrinsics::Float32>>) {
138 auto [rm, frm, src] = args_tuple;
139 if (rm != FPFlags::DYN) {
140 return false;
141 }
142 return TryInlineIntrinsic<
143 &intrinsics::FCvtFloatToIntegerHostRounding<int32_t, intrinsics::Float32>>(
144 as, reg_alloc, simd_reg_alloc, result, src);
145 }
146 return false;
147 }
148
149 private:
150 template <auto kFunction>
151 class FunctionCompareTag;
152
153 template <auto kOtherFunction>
154 static constexpr bool IsTagEq =
155 std::is_same_v<FunctionCompareTag<kFunc>, FunctionCompareTag<kOtherFunction>>;
156 };
157
158 template <typename format, typename DestType, typename SrcType>
159 auto Mov(MacroAssembler<x86_64::Assembler>& as, DestType dest, SrcType src)
160 -> decltype(std::declval<MacroAssembler<x86_64::Assembler>>()
161 .Mov<format>(std::declval<DestType>(), std::declval<SrcType>())) {
162 if constexpr (std::is_integral_v<format>) {
163 return as.template Mov<format>(dest, src);
164 } else if (host_platform::kHasAVX) {
165 return as.template Vmov<format>(dest, src);
166 } else {
167 return as.template Mov<format>(dest, src);
168 }
169 }
170
171 template <typename format, typename DestType, typename SrcType>
172 auto Mov(MacroAssembler<x86_64::Assembler>& as, DestType dest, SrcType src)
173 -> decltype(std::declval<MacroAssembler<x86_64::Assembler>>()
174 .Movs<format>(std::declval<DestType>(), std::declval<SrcType>())) {
175 if (host_platform::kHasAVX) {
176 if constexpr (std::is_same_v<DestType, MacroAssembler<x86_64::Assembler>::XMMRegister> &&
177 std::is_same_v<SrcType, MacroAssembler<x86_64::Assembler>::XMMRegister>) {
178 return as.template Vmovs<format>(dest, dest, src);
179 } else {
180 return as.template Vmovs<format>(dest, src);
181 }
182 } else {
183 return as.template Movs<format>(dest, src);
184 }
185 }
186
187 template <auto kFunction,
188 typename RegAlloc,
189 typename SIMDRegAlloc,
190 typename AssemblerResType,
191 typename... AssemblerArgType>
192 class TryBindingBasedInlineIntrinsic {
193 template <auto kFunctionForFriend,
194 typename RegAllocForFriend,
195 typename SIMDRegAllocForFriend,
196 typename AssemblerResTypeForFriend,
197 typename... AssemblerArgTypeForFriend>
198 friend bool TryInlineIntrinsic(MacroAssembler<x86_64::Assembler>& as,
199 RegAllocForFriend&& reg_alloc,
200 SIMDRegAllocForFriend&& simd_reg_alloc,
201 AssemblerResTypeForFriend result,
202 AssemblerArgTypeForFriend... args);
203 template <auto kFunc,
204 typename MacroAssembler,
205 typename Result,
206 typename Callback,
207 typename... Args>
208 friend Result intrinsics::bindings::ProcessBindings(Callback callback,
209 Result def_result,
210 Args&&... args);
211 template <auto kIntrinsicTemplateName,
212 auto kMacroInstructionTemplateName,
213 auto kMnemo,
214 typename GetOpcode,
215 typename kCPUIDRestrictionTemplateValue,
216 typename kPreciseNanOperationsHandlingTemplateValue,
217 bool kSideEffectsTemplateValue,
218 typename... Types>
219 friend class intrinsics::bindings::AsmCallInfo;
220
221 TryBindingBasedInlineIntrinsic() = delete;
222 TryBindingBasedInlineIntrinsic(const TryBindingBasedInlineIntrinsic&) = delete;
223 TryBindingBasedInlineIntrinsic(TryBindingBasedInlineIntrinsic&&) = default;
224 TryBindingBasedInlineIntrinsic& operator=(const TryBindingBasedInlineIntrinsic&) = delete;
225 TryBindingBasedInlineIntrinsic& operator=(TryBindingBasedInlineIntrinsic&&) = default;
226
TryBindingBasedInlineIntrinsic(MacroAssembler<x86_64::Assembler> & as,RegAlloc & reg_alloc,SIMDRegAlloc & simd_reg_alloc,AssemblerResType result,AssemblerArgType...args)227 TryBindingBasedInlineIntrinsic(MacroAssembler<x86_64::Assembler>& as,
228 RegAlloc& reg_alloc,
229 SIMDRegAlloc& simd_reg_alloc,
230 AssemblerResType result,
231 AssemblerArgType... args)
232 : as_(as),
233 reg_alloc_(reg_alloc),
234 simd_reg_alloc_(simd_reg_alloc),
235 result_{result},
236 input_args_(std::tuple{args...}),
237 success_(intrinsics::bindings::ProcessBindings<
238 kFunction,
239 typename MacroAssembler<x86_64::Assembler>::MacroAssemblers,
240 bool,
241 TryBindingBasedInlineIntrinsic&>(*this, false)) {}
242 operator bool() { return success_; }
243
244 template <typename AsmCallInfo>
operator()245 std::optional<bool> /*ProcessBindingsClient*/ operator()(AsmCallInfo asm_call_info) {
246 static_assert(std::is_same_v<decltype(kFunction), typename AsmCallInfo::IntrinsicType>);
247 static_assert(std::is_same_v<typename AsmCallInfo::PreciseNanOperationsHandling,
248 intrinsics::bindings::NoNansOperation>);
249 using CPUIDRestriction = AsmCallInfo::CPUIDRestriction;
250 if constexpr (std::is_same_v<CPUIDRestriction, intrinsics::bindings::HasAVX>) {
251 if (!host_platform::kHasAVX) {
252 return false;
253 }
254 } else if constexpr (std::is_same_v<CPUIDRestriction, intrinsics::bindings::HasBMI>) {
255 if (!host_platform::kHasBMI) {
256 return false;
257 }
258 } else if constexpr (std::is_same_v<CPUIDRestriction, intrinsics::bindings::HasLZCNT>) {
259 if (!host_platform::kHasLZCNT) {
260 return false;
261 }
262 } else if constexpr (std::is_same_v<CPUIDRestriction, intrinsics::bindings::HasPOPCNT>) {
263 if (!host_platform::kHasPOPCNT) {
264 return false;
265 }
266 } else if constexpr (std::is_same_v<CPUIDRestriction,
267 intrinsics::bindings::NoCPUIDRestriction>) {
268 // No restrictions. Do nothing.
269 } else {
270 static_assert(kDependentValueFalse<AsmCallInfo::kCPUIDRestriction>);
271 }
272 std::apply(
273 AsmCallInfo::kMacroInstruction,
274 std::tuple_cat(std::tuple<MacroAssembler<x86_64::Assembler>&>{as_},
275 AsmCallInfo::template MakeTuplefromBindings<TryBindingBasedInlineIntrinsic&>(
276 *this, asm_call_info)));
277 if constexpr (std::tuple_size_v<typename AsmCallInfo::OutputArguments> == 0) {
278 // No return value. Do nothing.
279 } else if constexpr (std::tuple_size_v<typename AsmCallInfo::OutputArguments> == 1) {
280 using ReturnType = std::tuple_element_t<0, typename AsmCallInfo::OutputArguments>;
281 if constexpr (std::is_integral_v<ReturnType>) {
282 if (result_reg_ != x86_64::Assembler::no_register) {
283 Mov<ReturnType>(as_, result_, result_reg_);
284 CHECK_EQ(result_xmm_reg_, x86_64::Assembler::no_xmm_register);
285 } else if (result_xmm_reg_ != x86_64::Assembler::no_xmm_register) {
286 Mov<typename TypeTraits<ReturnType>::Float>(as_, result_, result_xmm_reg_);
287 CHECK_EQ(result_reg_, x86_64::Assembler::no_register);
288 }
289 } else {
290 CHECK_EQ(result_reg_, x86_64::Assembler::no_register);
291 CHECK_EQ(result_xmm_reg_, x86_64::Assembler::no_xmm_register);
292 }
293 if constexpr (std::is_integral_v<ReturnType> && sizeof(ReturnType) < sizeof(std::int32_t)) {
294 // Don't handle these types just yet. We are not sure how to expand them and there
295 // are no examples.
296 static_assert(kDependentTypeFalse<ReturnType>);
297 }
298 if constexpr (std::is_same_v<ReturnType, int32_t> || std::is_same_v<ReturnType, uint32_t>) {
299 // Expans 32 bit values as signed. Even if actual results are processed as unsigned!
300 as_.Expand<int64_t, std::make_signed_t<ReturnType>>(result_, result_);
301 } else if constexpr (std::is_integral_v<ReturnType> &&
302 sizeof(ReturnType) == sizeof(std::int64_t)) {
303 // Do nothing, we have already produced expanded value.
304 } else if constexpr (std::is_same_v<ReturnType, intrinsics::Float32> ||
305 std::is_same_v<ReturnType, intrinsics::Float64>) {
306 // Do nothing, NaN boxing is handled by semantics player.
307 } else {
308 static_assert(kDependentTypeFalse<ReturnType>);
309 }
310 } else {
311 static_assert(kDependentTypeFalse<typename AsmCallInfo::OutputArguments>);
312 }
313 return {true};
314 }
315
316 template <typename ArgBinding, typename AsmCallInfo>
operator()317 auto /*MakeTuplefromBindingsClient*/ operator()(ArgTraits<ArgBinding>, AsmCallInfo) {
318 static constexpr const auto& arg_info = ArgTraits<ArgBinding>::arg_info;
319 if constexpr (arg_info.arg_type == ArgInfo::IMM_ARG) {
320 return ProcessArgInput<ArgBinding, AsmCallInfo>(reg_alloc_);
321 } else {
322 using RegisterClass = typename ArgTraits<ArgBinding>::RegisterClass;
323 if constexpr (RegisterClass::kAsRegister == 'x') {
324 return ProcessArgInput<ArgBinding, AsmCallInfo>(simd_reg_alloc_);
325 } else {
326 return ProcessArgInput<ArgBinding, AsmCallInfo>(reg_alloc_);
327 }
328 }
329 }
330
331 template <typename ArgBinding, typename AsmCallInfo, typename RegAllocForArg>
ProcessArgInput(RegAllocForArg && reg_alloc)332 auto ProcessArgInput(RegAllocForArg&& reg_alloc) {
333 static constexpr const auto& arg_info = ArgTraits<ArgBinding>::arg_info;
334 if constexpr (arg_info.arg_type == ArgInfo::IMM_ARG) {
335 return std::tuple{std::get<arg_info.from>(input_args_)};
336 } else {
337 using RegisterClass = typename ArgTraits<ArgBinding>::RegisterClass;
338 using Usage = typename ArgTraits<ArgBinding>::Usage;
339 if constexpr (arg_info.arg_type == ArgInfo::IN_ARG) {
340 using Type = std::tuple_element_t<arg_info.from, typename AsmCallInfo::InputArguments>;
341 if constexpr (RegisterClass::kAsRegister == 'x' && std::is_integral_v<Type>) {
342 auto reg = reg_alloc();
343 Mov<typename TypeTraits<int64_t>::Float>(as_, reg, std::get<arg_info.from>(input_args_));
344 return std::tuple{reg};
345 } else {
346 static_assert(std::is_same_v<Usage, intrinsics::bindings::Use>);
347 static_assert(!RegisterClass::kIsImplicitReg);
348 return std::tuple{std::get<arg_info.from>(input_args_)};
349 }
350 } else if constexpr (arg_info.arg_type == ArgInfo::IN_OUT_ARG) {
351 using Type = std::tuple_element_t<arg_info.from, typename AsmCallInfo::InputArguments>;
352 static_assert(std::is_same_v<Usage, intrinsics::bindings::UseDef>);
353 static_assert(!RegisterClass::kIsImplicitReg);
354 if constexpr (RegisterClass::kAsRegister == 'x' && std::is_integral_v<Type>) {
355 static_assert(std::is_integral_v<
356 std::tuple_element_t<arg_info.to, typename AsmCallInfo::OutputArguments>>);
357 CHECK_EQ(result_xmm_reg_, x86_64::Assembler::no_xmm_register);
358 result_xmm_reg_ = reg_alloc();
359 Mov<typename TypeTraits<int64_t>::Float>(
360 as_, result_xmm_reg_, std::get<arg_info.from>(input_args_));
361 return std::tuple{result_xmm_reg_};
362 } else {
363 Mov<std::tuple_element_t<arg_info.from, typename AsmCallInfo::InputArguments>>(
364 as_, result_, std::get<arg_info.from>(input_args_));
365 return std::tuple{result_};
366 }
367 } else if constexpr (arg_info.arg_type == ArgInfo::IN_TMP_ARG) {
368 if constexpr (RegisterClass::kAsRegister == 'c') {
369 Mov<std::tuple_element_t<arg_info.from, typename AsmCallInfo::InputArguments>>(
370 as_, as_.rcx, std::get<arg_info.from>(input_args_));
371 return std::tuple{};
372 } else if constexpr (RegisterClass::kAsRegister == 'a') {
373 Mov<std::tuple_element_t<arg_info.from, typename AsmCallInfo::InputArguments>>(
374 as_, as_.rax, std::get<arg_info.from>(input_args_));
375 return std::tuple{};
376 } else {
377 static_assert(std::is_same_v<Usage, intrinsics::bindings::UseDef>);
378 static_assert(!RegisterClass::kIsImplicitReg);
379 auto reg = reg_alloc();
380 Mov<std::tuple_element_t<arg_info.from, typename AsmCallInfo::InputArguments>>(
381 as_, reg, std::get<arg_info.from>(input_args_));
382 return std::tuple{reg};
383 }
384 } else if constexpr (arg_info.arg_type == ArgInfo::IN_OUT_TMP_ARG) {
385 using Type = std::tuple_element_t<arg_info.from, typename AsmCallInfo::InputArguments>;
386 static_assert(std::is_same_v<Usage, intrinsics::bindings::UseDef>);
387 static_assert(RegisterClass::kIsImplicitReg);
388 if constexpr (RegisterClass::kAsRegister == 'a') {
389 CHECK_EQ(result_reg_, x86_64::Assembler::no_register);
390 Mov<Type>(as_, as_.rax, std::get<arg_info.from>(input_args_));
391 result_reg_ = as_.rax;
392 return std::tuple{};
393 } else {
394 static_assert(kDependentValueFalse<arg_info.arg_type>);
395 }
396 } else if constexpr (arg_info.arg_type == ArgInfo::OUT_ARG) {
397 using Type = std::tuple_element_t<arg_info.to, typename AsmCallInfo::OutputArguments>;
398 static_assert(std::is_same_v<Usage, intrinsics::bindings::Def> ||
399 std::is_same_v<Usage, intrinsics::bindings::DefEarlyClobber>);
400 if constexpr (RegisterClass::kAsRegister == 'a') {
401 CHECK_EQ(result_reg_, x86_64::Assembler::no_register);
402 result_reg_ = as_.rax;
403 return std::tuple{};
404 } else if constexpr (RegisterClass::kAsRegister == 'c') {
405 CHECK_EQ(result_reg_, x86_64::Assembler::no_register);
406 result_reg_ = as_.rcx;
407 return std::tuple{};
408 } else {
409 static_assert(!RegisterClass::kIsImplicitReg);
410 if constexpr (RegisterClass::kAsRegister == 'x' && std::is_integral_v<Type>) {
411 CHECK_EQ(result_xmm_reg_, x86_64::Assembler::no_xmm_register);
412 result_xmm_reg_ = reg_alloc();
413 return std::tuple{result_xmm_reg_};
414 } else {
415 return std::tuple{result_};
416 }
417 }
418 } else if constexpr (arg_info.arg_type == ArgInfo::OUT_TMP_ARG) {
419 if constexpr (RegisterClass::kAsRegister == 'd') {
420 result_reg_ = as_.rdx;
421 return std::tuple{};
422 } else {
423 static_assert(kDependentValueFalse<arg_info.arg_type>);
424 }
425 } else if constexpr (arg_info.arg_type == ArgInfo::TMP_ARG) {
426 static_assert(std::is_same_v<Usage, intrinsics::bindings::Def> ||
427 std::is_same_v<Usage, intrinsics::bindings::DefEarlyClobber>);
428 if constexpr (RegisterClass::kAsRegister == 'm') {
429 if (scratch_arg_ >= config::kScratchAreaSize / config::kScratchAreaSlotSize) {
430 FATAL("Only two scratch registers are supported for now");
431 }
432 return std::tuple{x86_64::Assembler::Operand{
433 .base = as_.rbp,
434 .disp = static_cast<int>(offsetof(ThreadState, intrinsics_scratch_area) +
435 config::kScratchAreaSlotSize * scratch_arg_++)}};
436 } else if constexpr (RegisterClass::kIsImplicitReg) {
437 return std::tuple{};
438 } else {
439 return std::tuple{reg_alloc()};
440 }
441 } else {
442 static_assert(kDependentValueFalse<arg_info.arg_type>);
443 }
444 }
445 }
446
447 private:
448 MacroAssembler<x86_64::Assembler>& as_;
449 RegAlloc& reg_alloc_;
450 SIMDRegAlloc& simd_reg_alloc_;
451 AssemblerResType result_;
452 x86_64::Assembler::Register result_reg_ = x86_64::Assembler::no_register;
453 x86_64::Assembler::XMMRegister result_xmm_reg_ = x86_64::Assembler::no_xmm_register;
454 std::tuple<AssemblerArgType...> input_args_;
455 uint32_t scratch_arg_ = 0;
456 bool success_;
457 };
458
459 template <auto kFunction,
460 typename RegAlloc,
461 typename SIMDRegAlloc,
462 typename AssemblerResType,
463 typename... AssemblerArgType>
TryInlineIntrinsic(MacroAssembler<x86_64::Assembler> & as,RegAlloc && reg_alloc,SIMDRegAlloc && simd_reg_alloc,AssemblerResType result,AssemblerArgType...args)464 bool TryInlineIntrinsic(MacroAssembler<x86_64::Assembler>& as,
465 RegAlloc&& reg_alloc,
466 SIMDRegAlloc&& simd_reg_alloc,
467 AssemblerResType result,
468 AssemblerArgType... args) {
469 if (InlineIntrinsic<kFunction>::TryInlineWithHostRounding(
470 as, reg_alloc, simd_reg_alloc, result, args...)) {
471 return true;
472 }
473
474 return TryBindingBasedInlineIntrinsic<kFunction,
475 RegAlloc,
476 SIMDRegAlloc,
477 AssemblerResType,
478 AssemblerArgType...>(
479 as, reg_alloc, simd_reg_alloc, result, args...);
480 }
481
482 } // namespace berberis::inline_intrinsic
483
484 #endif // BERBERIS_LITE_TRANSLATOR_RISCV64_TO_X86_64_CALL_INTRINSIC_H_
485