1 //===-- lib/CodeGen/GlobalISel/InlineAsmLowering.cpp ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the lowering from LLVM IR inline asm to MIR INLINEASM
11 ///
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
15 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
16 #include "llvm/CodeGen/MachineOperand.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/TargetLowering.h"
19 #include "llvm/IR/Module.h"
20
21 #define DEBUG_TYPE "inline-asm-lowering"
22
23 using namespace llvm;
24
anchor()25 void InlineAsmLowering::anchor() {}
26
27 namespace {
28
29 /// GISelAsmOperandInfo - This contains information for each constraint that we
30 /// are lowering.
31 class GISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
32 public:
33 /// Regs - If this is a register or register class operand, this
34 /// contains the set of assigned registers corresponding to the operand.
35 SmallVector<Register, 1> Regs;
36
GISelAsmOperandInfo(const TargetLowering::AsmOperandInfo & Info)37 explicit GISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &Info)
38 : TargetLowering::AsmOperandInfo(Info) {}
39 };
40
41 using GISelAsmOperandInfoVector = SmallVector<GISelAsmOperandInfo, 16>;
42
43 class ExtraFlags {
44 unsigned Flags = 0;
45
46 public:
ExtraFlags(const CallBase & CB)47 explicit ExtraFlags(const CallBase &CB) {
48 const InlineAsm *IA = cast<InlineAsm>(CB.getCalledOperand());
49 if (IA->hasSideEffects())
50 Flags |= InlineAsm::Extra_HasSideEffects;
51 if (IA->isAlignStack())
52 Flags |= InlineAsm::Extra_IsAlignStack;
53 if (CB.isConvergent())
54 Flags |= InlineAsm::Extra_IsConvergent;
55 Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
56 }
57
update(const TargetLowering::AsmOperandInfo & OpInfo)58 void update(const TargetLowering::AsmOperandInfo &OpInfo) {
59 // Ideally, we would only check against memory constraints. However, the
60 // meaning of an Other constraint can be target-specific and we can't easily
61 // reason about it. Therefore, be conservative and set MayLoad/MayStore
62 // for Other constraints as well.
63 if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
64 OpInfo.ConstraintType == TargetLowering::C_Other) {
65 if (OpInfo.Type == InlineAsm::isInput)
66 Flags |= InlineAsm::Extra_MayLoad;
67 else if (OpInfo.Type == InlineAsm::isOutput)
68 Flags |= InlineAsm::Extra_MayStore;
69 else if (OpInfo.Type == InlineAsm::isClobber)
70 Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
71 }
72 }
73
get() const74 unsigned get() const { return Flags; }
75 };
76
77 } // namespace
78
79 /// Assign virtual/physical registers for the specified register operand.
getRegistersForValue(MachineFunction & MF,MachineIRBuilder & MIRBuilder,GISelAsmOperandInfo & OpInfo,GISelAsmOperandInfo & RefOpInfo)80 static void getRegistersForValue(MachineFunction &MF,
81 MachineIRBuilder &MIRBuilder,
82 GISelAsmOperandInfo &OpInfo,
83 GISelAsmOperandInfo &RefOpInfo) {
84
85 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
86 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
87
88 // No work to do for memory operations.
89 if (OpInfo.ConstraintType == TargetLowering::C_Memory)
90 return;
91
92 // If this is a constraint for a single physreg, or a constraint for a
93 // register class, find it.
94 Register AssignedReg;
95 const TargetRegisterClass *RC;
96 std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
97 &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
98 // RC is unset only on failure. Return immediately.
99 if (!RC)
100 return;
101
102 // No need to allocate a matching input constraint since the constraint it's
103 // matching to has already been allocated.
104 if (OpInfo.isMatchingInputConstraint())
105 return;
106
107 // Initialize NumRegs.
108 unsigned NumRegs = 1;
109 if (OpInfo.ConstraintVT != MVT::Other)
110 NumRegs =
111 TLI.getNumRegisters(MF.getFunction().getContext(), OpInfo.ConstraintVT);
112
113 // If this is a constraint for a specific physical register, but the type of
114 // the operand requires more than one register to be passed, we allocate the
115 // required amount of physical registers, starting from the selected physical
116 // register.
117 // For this, first retrieve a register iterator for the given register class
118 TargetRegisterClass::iterator I = RC->begin();
119 MachineRegisterInfo &RegInfo = MF.getRegInfo();
120
121 // Advance the iterator to the assigned register (if set)
122 if (AssignedReg) {
123 for (; *I != AssignedReg; ++I)
124 assert(I != RC->end() && "AssignedReg should be a member of provided RC");
125 }
126
127 // Finally, assign the registers. If the AssignedReg isn't set, create virtual
128 // registers with the provided register class
129 for (; NumRegs; --NumRegs, ++I) {
130 assert(I != RC->end() && "Ran out of registers to allocate!");
131 Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
132 OpInfo.Regs.push_back(R);
133 }
134 }
135
136 /// Return an integer indicating how general CT is.
getConstraintGenerality(TargetLowering::ConstraintType CT)137 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
138 switch (CT) {
139 case TargetLowering::C_Immediate:
140 case TargetLowering::C_Other:
141 case TargetLowering::C_Unknown:
142 return 0;
143 case TargetLowering::C_Register:
144 return 1;
145 case TargetLowering::C_RegisterClass:
146 return 2;
147 case TargetLowering::C_Memory:
148 case TargetLowering::C_Address:
149 return 3;
150 }
151 llvm_unreachable("Invalid constraint type");
152 }
153
chooseConstraint(TargetLowering::AsmOperandInfo & OpInfo,const TargetLowering * TLI)154 static void chooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
155 const TargetLowering *TLI) {
156 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
157 unsigned BestIdx = 0;
158 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
159 int BestGenerality = -1;
160
161 // Loop over the options, keeping track of the most general one.
162 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
163 TargetLowering::ConstraintType CType =
164 TLI->getConstraintType(OpInfo.Codes[i]);
165
166 // Indirect 'other' or 'immediate' constraints are not allowed.
167 if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory ||
168 CType == TargetLowering::C_Register ||
169 CType == TargetLowering::C_RegisterClass))
170 continue;
171
172 // If this is an 'other' or 'immediate' constraint, see if the operand is
173 // valid for it. For example, on X86 we might have an 'rI' constraint. If
174 // the operand is an integer in the range [0..31] we want to use I (saving a
175 // load of a register), otherwise we must use 'r'.
176 if (CType == TargetLowering::C_Other ||
177 CType == TargetLowering::C_Immediate) {
178 assert(OpInfo.Codes[i].size() == 1 &&
179 "Unhandled multi-letter 'other' constraint");
180 // FIXME: prefer immediate constraints if the target allows it
181 }
182
183 // Things with matching constraints can only be registers, per gcc
184 // documentation. This mainly affects "g" constraints.
185 if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
186 continue;
187
188 // This constraint letter is more general than the previous one, use it.
189 int Generality = getConstraintGenerality(CType);
190 if (Generality > BestGenerality) {
191 BestType = CType;
192 BestIdx = i;
193 BestGenerality = Generality;
194 }
195 }
196
197 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx];
198 OpInfo.ConstraintType = BestType;
199 }
200
computeConstraintToUse(const TargetLowering * TLI,TargetLowering::AsmOperandInfo & OpInfo)201 static void computeConstraintToUse(const TargetLowering *TLI,
202 TargetLowering::AsmOperandInfo &OpInfo) {
203 assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
204
205 // Single-letter constraints ('r') are very common.
206 if (OpInfo.Codes.size() == 1) {
207 OpInfo.ConstraintCode = OpInfo.Codes[0];
208 OpInfo.ConstraintType = TLI->getConstraintType(OpInfo.ConstraintCode);
209 } else {
210 chooseConstraint(OpInfo, TLI);
211 }
212
213 // 'X' matches anything.
214 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
215 // Labels and constants are handled elsewhere ('X' is the only thing
216 // that matches labels). For Functions, the type here is the type of
217 // the result, which is not what we want to look at; leave them alone.
218 Value *Val = OpInfo.CallOperandVal;
219 if (isa<BasicBlock>(Val) || isa<ConstantInt>(Val) || isa<Function>(Val))
220 return;
221
222 // Otherwise, try to resolve it to something we know about by looking at
223 // the actual operand type.
224 if (const char *Repl = TLI->LowerXConstraint(OpInfo.ConstraintVT)) {
225 OpInfo.ConstraintCode = Repl;
226 OpInfo.ConstraintType = TLI->getConstraintType(OpInfo.ConstraintCode);
227 }
228 }
229 }
230
getNumOpRegs(const MachineInstr & I,unsigned OpIdx)231 static unsigned getNumOpRegs(const MachineInstr &I, unsigned OpIdx) {
232 unsigned Flag = I.getOperand(OpIdx).getImm();
233 return InlineAsm::getNumOperandRegisters(Flag);
234 }
235
buildAnyextOrCopy(Register Dst,Register Src,MachineIRBuilder & MIRBuilder)236 static bool buildAnyextOrCopy(Register Dst, Register Src,
237 MachineIRBuilder &MIRBuilder) {
238 const TargetRegisterInfo *TRI =
239 MIRBuilder.getMF().getSubtarget().getRegisterInfo();
240 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
241
242 auto SrcTy = MRI->getType(Src);
243 if (!SrcTy.isValid()) {
244 LLVM_DEBUG(dbgs() << "Source type for copy is not valid\n");
245 return false;
246 }
247 unsigned SrcSize = TRI->getRegSizeInBits(Src, *MRI);
248 unsigned DstSize = TRI->getRegSizeInBits(Dst, *MRI);
249
250 if (DstSize < SrcSize) {
251 LLVM_DEBUG(dbgs() << "Input can't fit in destination reg class\n");
252 return false;
253 }
254
255 // Attempt to anyext small scalar sources.
256 if (DstSize > SrcSize) {
257 if (!SrcTy.isScalar()) {
258 LLVM_DEBUG(dbgs() << "Can't extend non-scalar input to size of"
259 "destination register class\n");
260 return false;
261 }
262 Src = MIRBuilder.buildAnyExt(LLT::scalar(DstSize), Src).getReg(0);
263 }
264
265 MIRBuilder.buildCopy(Dst, Src);
266 return true;
267 }
268
lowerInlineAsm(MachineIRBuilder & MIRBuilder,const CallBase & Call,std::function<ArrayRef<Register> (const Value & Val)> GetOrCreateVRegs) const269 bool InlineAsmLowering::lowerInlineAsm(
270 MachineIRBuilder &MIRBuilder, const CallBase &Call,
271 std::function<ArrayRef<Register>(const Value &Val)> GetOrCreateVRegs)
272 const {
273 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
274
275 /// ConstraintOperands - Information about all of the constraints.
276 GISelAsmOperandInfoVector ConstraintOperands;
277
278 MachineFunction &MF = MIRBuilder.getMF();
279 const Function &F = MF.getFunction();
280 const DataLayout &DL = F.getParent()->getDataLayout();
281 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
282
283 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
284
285 TargetLowering::AsmOperandInfoVector TargetConstraints =
286 TLI->ParseConstraints(DL, TRI, Call);
287
288 ExtraFlags ExtraInfo(Call);
289 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
290 unsigned ResNo = 0; // ResNo - The result number of the next output.
291 for (auto &T : TargetConstraints) {
292 ConstraintOperands.push_back(GISelAsmOperandInfo(T));
293 GISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
294
295 // Compute the value type for each operand.
296 if (OpInfo.hasArg()) {
297 OpInfo.CallOperandVal = const_cast<Value *>(Call.getArgOperand(ArgNo));
298
299 if (isa<BasicBlock>(OpInfo.CallOperandVal)) {
300 LLVM_DEBUG(dbgs() << "Basic block input operands not supported yet\n");
301 return false;
302 }
303
304 Type *OpTy = OpInfo.CallOperandVal->getType();
305
306 // If this is an indirect operand, the operand is a pointer to the
307 // accessed type.
308 if (OpInfo.isIndirect) {
309 OpTy = Call.getParamElementType(ArgNo);
310 assert(OpTy && "Indirect operand must have elementtype attribute");
311 }
312
313 // FIXME: Support aggregate input operands
314 if (!OpTy->isSingleValueType()) {
315 LLVM_DEBUG(
316 dbgs() << "Aggregate input operands are not supported yet\n");
317 return false;
318 }
319
320 OpInfo.ConstraintVT =
321 TLI->getAsmOperandValueType(DL, OpTy, true).getSimpleVT();
322 ++ArgNo;
323 } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
324 assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
325 if (StructType *STy = dyn_cast<StructType>(Call.getType())) {
326 OpInfo.ConstraintVT =
327 TLI->getSimpleValueType(DL, STy->getElementType(ResNo));
328 } else {
329 assert(ResNo == 0 && "Asm only has one result!");
330 OpInfo.ConstraintVT =
331 TLI->getAsmOperandValueType(DL, Call.getType()).getSimpleVT();
332 }
333 ++ResNo;
334 } else {
335 assert(OpInfo.Type != InlineAsm::isLabel &&
336 "GlobalISel currently doesn't support callbr");
337 OpInfo.ConstraintVT = MVT::Other;
338 }
339
340 if (OpInfo.ConstraintVT == MVT::i64x8)
341 return false;
342
343 // Compute the constraint code and ConstraintType to use.
344 computeConstraintToUse(TLI, OpInfo);
345
346 // The selected constraint type might expose new sideeffects
347 ExtraInfo.update(OpInfo);
348 }
349
350 // At this point, all operand types are decided.
351 // Create the MachineInstr, but don't insert it yet since input
352 // operands still need to insert instructions before this one
353 auto Inst = MIRBuilder.buildInstrNoInsert(TargetOpcode::INLINEASM)
354 .addExternalSymbol(IA->getAsmString().c_str())
355 .addImm(ExtraInfo.get());
356
357 // Starting from this operand: flag followed by register(s) will be added as
358 // operands to Inst for each constraint. Used for matching input constraints.
359 unsigned StartIdx = Inst->getNumOperands();
360
361 // Collects the output operands for later processing
362 GISelAsmOperandInfoVector OutputOperands;
363
364 for (auto &OpInfo : ConstraintOperands) {
365 GISelAsmOperandInfo &RefOpInfo =
366 OpInfo.isMatchingInputConstraint()
367 ? ConstraintOperands[OpInfo.getMatchedOperand()]
368 : OpInfo;
369
370 // Assign registers for register operands
371 getRegistersForValue(MF, MIRBuilder, OpInfo, RefOpInfo);
372
373 switch (OpInfo.Type) {
374 case InlineAsm::isOutput:
375 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
376 unsigned ConstraintID =
377 TLI->getInlineAsmMemConstraint(OpInfo.ConstraintCode);
378 assert(ConstraintID != InlineAsm::Constraint_Unknown &&
379 "Failed to convert memory constraint code to constraint id.");
380
381 // Add information to the INLINEASM instruction to know about this
382 // output.
383 unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
384 OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
385 Inst.addImm(OpFlags);
386 ArrayRef<Register> SourceRegs =
387 GetOrCreateVRegs(*OpInfo.CallOperandVal);
388 assert(
389 SourceRegs.size() == 1 &&
390 "Expected the memory output to fit into a single virtual register");
391 Inst.addReg(SourceRegs[0]);
392 } else {
393 // Otherwise, this outputs to a register (directly for C_Register /
394 // C_RegisterClass. Find a register that we can use.
395 assert(OpInfo.ConstraintType == TargetLowering::C_Register ||
396 OpInfo.ConstraintType == TargetLowering::C_RegisterClass);
397
398 if (OpInfo.Regs.empty()) {
399 LLVM_DEBUG(dbgs()
400 << "Couldn't allocate output register for constraint\n");
401 return false;
402 }
403
404 // Add information to the INLINEASM instruction to know that this
405 // register is set.
406 unsigned Flag = InlineAsm::getFlagWord(
407 OpInfo.isEarlyClobber ? InlineAsm::Kind_RegDefEarlyClobber
408 : InlineAsm::Kind_RegDef,
409 OpInfo.Regs.size());
410 if (OpInfo.Regs.front().isVirtual()) {
411 // Put the register class of the virtual registers in the flag word.
412 // That way, later passes can recompute register class constraints for
413 // inline assembly as well as normal instructions. Don't do this for
414 // tied operands that can use the regclass information from the def.
415 const TargetRegisterClass *RC = MRI->getRegClass(OpInfo.Regs.front());
416 Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
417 }
418
419 Inst.addImm(Flag);
420
421 for (Register Reg : OpInfo.Regs) {
422 Inst.addReg(Reg,
423 RegState::Define | getImplRegState(Reg.isPhysical()) |
424 (OpInfo.isEarlyClobber ? RegState::EarlyClobber : 0));
425 }
426
427 // Remember this output operand for later processing
428 OutputOperands.push_back(OpInfo);
429 }
430
431 break;
432 case InlineAsm::isInput:
433 case InlineAsm::isLabel: {
434 if (OpInfo.isMatchingInputConstraint()) {
435 unsigned DefIdx = OpInfo.getMatchedOperand();
436 // Find operand with register def that corresponds to DefIdx.
437 unsigned InstFlagIdx = StartIdx;
438 for (unsigned i = 0; i < DefIdx; ++i)
439 InstFlagIdx += getNumOpRegs(*Inst, InstFlagIdx) + 1;
440 assert(getNumOpRegs(*Inst, InstFlagIdx) == 1 && "Wrong flag");
441
442 unsigned MatchedOperandFlag = Inst->getOperand(InstFlagIdx).getImm();
443 if (InlineAsm::isMemKind(MatchedOperandFlag)) {
444 LLVM_DEBUG(dbgs() << "Matching input constraint to mem operand not "
445 "supported. This should be target specific.\n");
446 return false;
447 }
448 if (!InlineAsm::isRegDefKind(MatchedOperandFlag) &&
449 !InlineAsm::isRegDefEarlyClobberKind(MatchedOperandFlag)) {
450 LLVM_DEBUG(dbgs() << "Unknown matching constraint\n");
451 return false;
452 }
453
454 // We want to tie input to register in next operand.
455 unsigned DefRegIdx = InstFlagIdx + 1;
456 Register Def = Inst->getOperand(DefRegIdx).getReg();
457
458 ArrayRef<Register> SrcRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal);
459 assert(SrcRegs.size() == 1 && "Single register is expected here");
460
461 // When Def is physreg: use given input.
462 Register In = SrcRegs[0];
463 // When Def is vreg: copy input to new vreg with same reg class as Def.
464 if (Def.isVirtual()) {
465 In = MRI->createVirtualRegister(MRI->getRegClass(Def));
466 if (!buildAnyextOrCopy(In, SrcRegs[0], MIRBuilder))
467 return false;
468 }
469
470 // Add Flag and input register operand (In) to Inst. Tie In to Def.
471 unsigned UseFlag = InlineAsm::getFlagWord(InlineAsm::Kind_RegUse, 1);
472 unsigned Flag = InlineAsm::getFlagWordForMatchingOp(UseFlag, DefIdx);
473 Inst.addImm(Flag);
474 Inst.addReg(In);
475 Inst->tieOperands(DefRegIdx, Inst->getNumOperands() - 1);
476 break;
477 }
478
479 if (OpInfo.ConstraintType == TargetLowering::C_Other &&
480 OpInfo.isIndirect) {
481 LLVM_DEBUG(dbgs() << "Indirect input operands with unknown constraint "
482 "not supported yet\n");
483 return false;
484 }
485
486 if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
487 OpInfo.ConstraintType == TargetLowering::C_Other) {
488
489 std::vector<MachineOperand> Ops;
490 if (!lowerAsmOperandForConstraint(OpInfo.CallOperandVal,
491 OpInfo.ConstraintCode, Ops,
492 MIRBuilder)) {
493 LLVM_DEBUG(dbgs() << "Don't support constraint: "
494 << OpInfo.ConstraintCode << " yet\n");
495 return false;
496 }
497
498 assert(Ops.size() > 0 &&
499 "Expected constraint to be lowered to at least one operand");
500
501 // Add information to the INLINEASM node to know about this input.
502 unsigned OpFlags =
503 InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
504 Inst.addImm(OpFlags);
505 Inst.add(Ops);
506 break;
507 }
508
509 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
510
511 if (!OpInfo.isIndirect) {
512 LLVM_DEBUG(dbgs()
513 << "Cannot indirectify memory input operands yet\n");
514 return false;
515 }
516
517 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
518
519 unsigned ConstraintID =
520 TLI->getInlineAsmMemConstraint(OpInfo.ConstraintCode);
521 unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
522 OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
523 Inst.addImm(OpFlags);
524 ArrayRef<Register> SourceRegs =
525 GetOrCreateVRegs(*OpInfo.CallOperandVal);
526 assert(
527 SourceRegs.size() == 1 &&
528 "Expected the memory input to fit into a single virtual register");
529 Inst.addReg(SourceRegs[0]);
530 break;
531 }
532
533 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
534 OpInfo.ConstraintType == TargetLowering::C_Register) &&
535 "Unknown constraint type!");
536
537 if (OpInfo.isIndirect) {
538 LLVM_DEBUG(dbgs() << "Can't handle indirect register inputs yet "
539 "for constraint '"
540 << OpInfo.ConstraintCode << "'\n");
541 return false;
542 }
543
544 // Copy the input into the appropriate registers.
545 if (OpInfo.Regs.empty()) {
546 LLVM_DEBUG(
547 dbgs()
548 << "Couldn't allocate input register for register constraint\n");
549 return false;
550 }
551
552 unsigned NumRegs = OpInfo.Regs.size();
553 ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal);
554 assert(NumRegs == SourceRegs.size() &&
555 "Expected the number of input registers to match the number of "
556 "source registers");
557
558 if (NumRegs > 1) {
559 LLVM_DEBUG(dbgs() << "Input operands with multiple input registers are "
560 "not supported yet\n");
561 return false;
562 }
563
564 unsigned Flag = InlineAsm::getFlagWord(InlineAsm::Kind_RegUse, NumRegs);
565 if (OpInfo.Regs.front().isVirtual()) {
566 // Put the register class of the virtual registers in the flag word.
567 const TargetRegisterClass *RC = MRI->getRegClass(OpInfo.Regs.front());
568 Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
569 }
570 Inst.addImm(Flag);
571 if (!buildAnyextOrCopy(OpInfo.Regs[0], SourceRegs[0], MIRBuilder))
572 return false;
573 Inst.addReg(OpInfo.Regs[0]);
574 break;
575 }
576
577 case InlineAsm::isClobber: {
578
579 unsigned NumRegs = OpInfo.Regs.size();
580 if (NumRegs > 0) {
581 unsigned Flag =
582 InlineAsm::getFlagWord(InlineAsm::Kind_Clobber, NumRegs);
583 Inst.addImm(Flag);
584
585 for (Register Reg : OpInfo.Regs) {
586 Inst.addReg(Reg, RegState::Define | RegState::EarlyClobber |
587 getImplRegState(Reg.isPhysical()));
588 }
589 }
590 break;
591 }
592 }
593 }
594
595 if (const MDNode *SrcLoc = Call.getMetadata("srcloc"))
596 Inst.addMetadata(SrcLoc);
597
598 // All inputs are handled, insert the instruction now
599 MIRBuilder.insertInstr(Inst);
600
601 // Finally, copy the output operands into the output registers
602 ArrayRef<Register> ResRegs = GetOrCreateVRegs(Call);
603 if (ResRegs.size() != OutputOperands.size()) {
604 LLVM_DEBUG(dbgs() << "Expected the number of output registers to match the "
605 "number of destination registers\n");
606 return false;
607 }
608 for (unsigned int i = 0, e = ResRegs.size(); i < e; i++) {
609 GISelAsmOperandInfo &OpInfo = OutputOperands[i];
610
611 if (OpInfo.Regs.empty())
612 continue;
613
614 switch (OpInfo.ConstraintType) {
615 case TargetLowering::C_Register:
616 case TargetLowering::C_RegisterClass: {
617 if (OpInfo.Regs.size() > 1) {
618 LLVM_DEBUG(dbgs() << "Output operands with multiple defining "
619 "registers are not supported yet\n");
620 return false;
621 }
622
623 Register SrcReg = OpInfo.Regs[0];
624 unsigned SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
625 LLT ResTy = MRI->getType(ResRegs[i]);
626 if (ResTy.isScalar() && ResTy.getSizeInBits() < SrcSize) {
627 // First copy the non-typed virtual register into a generic virtual
628 // register
629 Register Tmp1Reg =
630 MRI->createGenericVirtualRegister(LLT::scalar(SrcSize));
631 MIRBuilder.buildCopy(Tmp1Reg, SrcReg);
632 // Need to truncate the result of the register
633 MIRBuilder.buildTrunc(ResRegs[i], Tmp1Reg);
634 } else if (ResTy.getSizeInBits() == SrcSize) {
635 MIRBuilder.buildCopy(ResRegs[i], SrcReg);
636 } else {
637 LLVM_DEBUG(dbgs() << "Unhandled output operand with "
638 "mismatched register size\n");
639 return false;
640 }
641
642 break;
643 }
644 case TargetLowering::C_Immediate:
645 case TargetLowering::C_Other:
646 LLVM_DEBUG(
647 dbgs() << "Cannot lower target specific output constraints yet\n");
648 return false;
649 case TargetLowering::C_Memory:
650 break; // Already handled.
651 case TargetLowering::C_Address:
652 break; // Silence warning.
653 case TargetLowering::C_Unknown:
654 LLVM_DEBUG(dbgs() << "Unexpected unknown constraint\n");
655 return false;
656 }
657 }
658
659 return true;
660 }
661
lowerAsmOperandForConstraint(Value * Val,StringRef Constraint,std::vector<MachineOperand> & Ops,MachineIRBuilder & MIRBuilder) const662 bool InlineAsmLowering::lowerAsmOperandForConstraint(
663 Value *Val, StringRef Constraint, std::vector<MachineOperand> &Ops,
664 MachineIRBuilder &MIRBuilder) const {
665 if (Constraint.size() > 1)
666 return false;
667
668 char ConstraintLetter = Constraint[0];
669 switch (ConstraintLetter) {
670 default:
671 return false;
672 case 'i': // Simple Integer or Relocatable Constant
673 case 'n': // immediate integer with a known value.
674 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
675 assert(CI->getBitWidth() <= 64 &&
676 "expected immediate to fit into 64-bits");
677 // Boolean constants should be zero-extended, others are sign-extended
678 bool IsBool = CI->getBitWidth() == 1;
679 int64_t ExtVal = IsBool ? CI->getZExtValue() : CI->getSExtValue();
680 Ops.push_back(MachineOperand::CreateImm(ExtVal));
681 return true;
682 }
683 return false;
684 }
685 }
686