1 // Copyright (c) 2017 The Khronos Group Inc.
2 // Copyright (c) 2017 Valve Corporation
3 // Copyright (c) 2017 LunarG Inc.
4 //
5 // Licensed under the Apache License, Version 2.0 (the "License");
6 // you may not use this file except in compliance with the License.
7 // You may obtain a copy of the License at
8 //
9 // http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16
17 #include "source/opt/inline_pass.h"
18
19 #include <unordered_set>
20 #include <utility>
21
22 #include "source/cfa.h"
23 #include "source/opt/reflect.h"
24 #include "source/util/make_unique.h"
25
26 namespace spvtools {
27 namespace opt {
28 namespace {
29 // Indices of operands in SPIR-V instructions
30 constexpr int kSpvFunctionCallFunctionId = 2;
31 constexpr int kSpvFunctionCallArgumentId = 3;
32 constexpr int kSpvReturnValueId = 0;
33 } // namespace
34
AddPointerToType(uint32_t type_id,spv::StorageClass storage_class)35 uint32_t InlinePass::AddPointerToType(uint32_t type_id,
36 spv::StorageClass storage_class) {
37 uint32_t resultId = context()->TakeNextId();
38 if (resultId == 0) {
39 return resultId;
40 }
41
42 std::unique_ptr<Instruction> type_inst(
43 new Instruction(context(), spv::Op::OpTypePointer, 0, resultId,
44 {{spv_operand_type_t::SPV_OPERAND_TYPE_STORAGE_CLASS,
45 {uint32_t(storage_class)}},
46 {spv_operand_type_t::SPV_OPERAND_TYPE_ID, {type_id}}}));
47 context()->AddType(std::move(type_inst));
48 analysis::Type* pointeeTy;
49 std::unique_ptr<analysis::Pointer> pointerTy;
50 std::tie(pointeeTy, pointerTy) =
51 context()->get_type_mgr()->GetTypeAndPointerType(
52 type_id, spv::StorageClass::Function);
53 context()->get_type_mgr()->RegisterType(resultId, *pointerTy);
54 return resultId;
55 }
56
AddBranch(uint32_t label_id,std::unique_ptr<BasicBlock> * block_ptr)57 void InlinePass::AddBranch(uint32_t label_id,
58 std::unique_ptr<BasicBlock>* block_ptr) {
59 std::unique_ptr<Instruction> newBranch(
60 new Instruction(context(), spv::Op::OpBranch, 0, 0,
61 {{spv_operand_type_t::SPV_OPERAND_TYPE_ID, {label_id}}}));
62 (*block_ptr)->AddInstruction(std::move(newBranch));
63 }
64
AddBranchCond(uint32_t cond_id,uint32_t true_id,uint32_t false_id,std::unique_ptr<BasicBlock> * block_ptr)65 void InlinePass::AddBranchCond(uint32_t cond_id, uint32_t true_id,
66 uint32_t false_id,
67 std::unique_ptr<BasicBlock>* block_ptr) {
68 std::unique_ptr<Instruction> newBranch(
69 new Instruction(context(), spv::Op::OpBranchConditional, 0, 0,
70 {{spv_operand_type_t::SPV_OPERAND_TYPE_ID, {cond_id}},
71 {spv_operand_type_t::SPV_OPERAND_TYPE_ID, {true_id}},
72 {spv_operand_type_t::SPV_OPERAND_TYPE_ID, {false_id}}}));
73 (*block_ptr)->AddInstruction(std::move(newBranch));
74 }
75
AddLoopMerge(uint32_t merge_id,uint32_t continue_id,std::unique_ptr<BasicBlock> * block_ptr)76 void InlinePass::AddLoopMerge(uint32_t merge_id, uint32_t continue_id,
77 std::unique_ptr<BasicBlock>* block_ptr) {
78 std::unique_ptr<Instruction> newLoopMerge(new Instruction(
79 context(), spv::Op::OpLoopMerge, 0, 0,
80 {{spv_operand_type_t::SPV_OPERAND_TYPE_ID, {merge_id}},
81 {spv_operand_type_t::SPV_OPERAND_TYPE_ID, {continue_id}},
82 {spv_operand_type_t::SPV_OPERAND_TYPE_LOOP_CONTROL, {0}}}));
83 (*block_ptr)->AddInstruction(std::move(newLoopMerge));
84 }
85
AddStore(uint32_t ptr_id,uint32_t val_id,std::unique_ptr<BasicBlock> * block_ptr,const Instruction * line_inst,const DebugScope & dbg_scope)86 void InlinePass::AddStore(uint32_t ptr_id, uint32_t val_id,
87 std::unique_ptr<BasicBlock>* block_ptr,
88 const Instruction* line_inst,
89 const DebugScope& dbg_scope) {
90 std::unique_ptr<Instruction> newStore(
91 new Instruction(context(), spv::Op::OpStore, 0, 0,
92 {{spv_operand_type_t::SPV_OPERAND_TYPE_ID, {ptr_id}},
93 {spv_operand_type_t::SPV_OPERAND_TYPE_ID, {val_id}}}));
94 if (line_inst != nullptr) {
95 newStore->AddDebugLine(line_inst);
96 }
97 newStore->SetDebugScope(dbg_scope);
98 (*block_ptr)->AddInstruction(std::move(newStore));
99 }
100
AddLoad(uint32_t type_id,uint32_t resultId,uint32_t ptr_id,std::unique_ptr<BasicBlock> * block_ptr,const Instruction * line_inst,const DebugScope & dbg_scope)101 void InlinePass::AddLoad(uint32_t type_id, uint32_t resultId, uint32_t ptr_id,
102 std::unique_ptr<BasicBlock>* block_ptr,
103 const Instruction* line_inst,
104 const DebugScope& dbg_scope) {
105 std::unique_ptr<Instruction> newLoad(
106 new Instruction(context(), spv::Op::OpLoad, type_id, resultId,
107 {{spv_operand_type_t::SPV_OPERAND_TYPE_ID, {ptr_id}}}));
108 if (line_inst != nullptr) {
109 newLoad->AddDebugLine(line_inst);
110 }
111 newLoad->SetDebugScope(dbg_scope);
112 (*block_ptr)->AddInstruction(std::move(newLoad));
113 }
114
NewLabel(uint32_t label_id)115 std::unique_ptr<Instruction> InlinePass::NewLabel(uint32_t label_id) {
116 std::unique_ptr<Instruction> newLabel(
117 new Instruction(context(), spv::Op::OpLabel, 0, label_id, {}));
118 return newLabel;
119 }
120
GetFalseId()121 uint32_t InlinePass::GetFalseId() {
122 if (false_id_ != 0) return false_id_;
123 false_id_ = get_module()->GetGlobalValue(spv::Op::OpConstantFalse);
124 if (false_id_ != 0) return false_id_;
125 uint32_t boolId = get_module()->GetGlobalValue(spv::Op::OpTypeBool);
126 if (boolId == 0) {
127 boolId = context()->TakeNextId();
128 if (boolId == 0) {
129 return 0;
130 }
131 get_module()->AddGlobalValue(spv::Op::OpTypeBool, boolId, 0);
132 }
133 false_id_ = context()->TakeNextId();
134 if (false_id_ == 0) {
135 return 0;
136 }
137 get_module()->AddGlobalValue(spv::Op::OpConstantFalse, false_id_, boolId);
138 return false_id_;
139 }
140
MapParams(Function * calleeFn,BasicBlock::iterator call_inst_itr,std::unordered_map<uint32_t,uint32_t> * callee2caller)141 void InlinePass::MapParams(
142 Function* calleeFn, BasicBlock::iterator call_inst_itr,
143 std::unordered_map<uint32_t, uint32_t>* callee2caller) {
144 int param_idx = 0;
145 calleeFn->ForEachParam(
146 [&call_inst_itr, ¶m_idx, &callee2caller](const Instruction* cpi) {
147 const uint32_t pid = cpi->result_id();
148 (*callee2caller)[pid] = call_inst_itr->GetSingleWordOperand(
149 kSpvFunctionCallArgumentId + param_idx);
150 ++param_idx;
151 });
152 }
153
CloneAndMapLocals(Function * calleeFn,std::vector<std::unique_ptr<Instruction>> * new_vars,std::unordered_map<uint32_t,uint32_t> * callee2caller,analysis::DebugInlinedAtContext * inlined_at_ctx)154 bool InlinePass::CloneAndMapLocals(
155 Function* calleeFn, std::vector<std::unique_ptr<Instruction>>* new_vars,
156 std::unordered_map<uint32_t, uint32_t>* callee2caller,
157 analysis::DebugInlinedAtContext* inlined_at_ctx) {
158 auto callee_block_itr = calleeFn->begin();
159 auto callee_var_itr = callee_block_itr->begin();
160 while (callee_var_itr->opcode() == spv::Op::OpVariable ||
161 callee_var_itr->GetCommonDebugOpcode() ==
162 CommonDebugInfoDebugDeclare) {
163 if (callee_var_itr->opcode() != spv::Op::OpVariable) {
164 ++callee_var_itr;
165 continue;
166 }
167
168 std::unique_ptr<Instruction> var_inst(callee_var_itr->Clone(context()));
169 uint32_t newId = context()->TakeNextId();
170 if (newId == 0) {
171 return false;
172 }
173 get_decoration_mgr()->CloneDecorations(callee_var_itr->result_id(), newId);
174 var_inst->SetResultId(newId);
175 var_inst->UpdateDebugInlinedAt(
176 context()->get_debug_info_mgr()->BuildDebugInlinedAtChain(
177 callee_var_itr->GetDebugInlinedAt(), inlined_at_ctx));
178 (*callee2caller)[callee_var_itr->result_id()] = newId;
179 new_vars->push_back(std::move(var_inst));
180 ++callee_var_itr;
181 }
182 return true;
183 }
184
CreateReturnVar(Function * calleeFn,std::vector<std::unique_ptr<Instruction>> * new_vars)185 uint32_t InlinePass::CreateReturnVar(
186 Function* calleeFn, std::vector<std::unique_ptr<Instruction>>* new_vars) {
187 uint32_t returnVarId = 0;
188 const uint32_t calleeTypeId = calleeFn->type_id();
189 analysis::TypeManager* type_mgr = context()->get_type_mgr();
190 assert(type_mgr->GetType(calleeTypeId)->AsVoid() == nullptr &&
191 "Cannot create a return variable of type void.");
192 // Find or create ptr to callee return type.
193 uint32_t returnVarTypeId =
194 type_mgr->FindPointerToType(calleeTypeId, spv::StorageClass::Function);
195
196 if (returnVarTypeId == 0) {
197 returnVarTypeId =
198 AddPointerToType(calleeTypeId, spv::StorageClass::Function);
199 if (returnVarTypeId == 0) {
200 return 0;
201 }
202 }
203
204 // Add return var to new function scope variables.
205 returnVarId = context()->TakeNextId();
206 if (returnVarId == 0) {
207 return 0;
208 }
209
210 std::unique_ptr<Instruction> var_inst(new Instruction(
211 context(), spv::Op::OpVariable, returnVarTypeId, returnVarId,
212 {{spv_operand_type_t::SPV_OPERAND_TYPE_STORAGE_CLASS,
213 {(uint32_t)spv::StorageClass::Function}}}));
214 new_vars->push_back(std::move(var_inst));
215 get_decoration_mgr()->CloneDecorations(calleeFn->result_id(), returnVarId);
216
217 // Decorate the return var with AliasedPointer if the storage class of the
218 // pointee type is PhysicalStorageBuffer.
219 auto const pointee_type =
220 type_mgr->GetType(returnVarTypeId)->AsPointer()->pointee_type();
221 if (pointee_type->AsPointer() != nullptr) {
222 if (pointee_type->AsPointer()->storage_class() ==
223 spv::StorageClass::PhysicalStorageBuffer) {
224 get_decoration_mgr()->AddDecoration(
225 returnVarId, uint32_t(spv::Decoration::AliasedPointer));
226 }
227 }
228
229 return returnVarId;
230 }
231
IsSameBlockOp(const Instruction * inst) const232 bool InlinePass::IsSameBlockOp(const Instruction* inst) const {
233 return inst->opcode() == spv::Op::OpSampledImage ||
234 inst->opcode() == spv::Op::OpImage;
235 }
236
CloneSameBlockOps(std::unique_ptr<Instruction> * inst,std::unordered_map<uint32_t,uint32_t> * postCallSB,std::unordered_map<uint32_t,Instruction * > * preCallSB,std::unique_ptr<BasicBlock> * block_ptr)237 bool InlinePass::CloneSameBlockOps(
238 std::unique_ptr<Instruction>* inst,
239 std::unordered_map<uint32_t, uint32_t>* postCallSB,
240 std::unordered_map<uint32_t, Instruction*>* preCallSB,
241 std::unique_ptr<BasicBlock>* block_ptr) {
242 return (*inst)->WhileEachInId([&postCallSB, &preCallSB, &block_ptr,
243 this](uint32_t* iid) {
244 const auto mapItr = (*postCallSB).find(*iid);
245 if (mapItr == (*postCallSB).end()) {
246 const auto mapItr2 = (*preCallSB).find(*iid);
247 if (mapItr2 != (*preCallSB).end()) {
248 // Clone pre-call same-block ops, map result id.
249 const Instruction* inInst = mapItr2->second;
250 std::unique_ptr<Instruction> sb_inst(inInst->Clone(context()));
251 if (!CloneSameBlockOps(&sb_inst, postCallSB, preCallSB, block_ptr)) {
252 return false;
253 }
254
255 const uint32_t rid = sb_inst->result_id();
256 const uint32_t nid = context()->TakeNextId();
257 if (nid == 0) {
258 return false;
259 }
260 get_decoration_mgr()->CloneDecorations(rid, nid);
261 sb_inst->SetResultId(nid);
262 (*postCallSB)[rid] = nid;
263 *iid = nid;
264 (*block_ptr)->AddInstruction(std::move(sb_inst));
265 }
266 } else {
267 // Reset same-block op operand.
268 *iid = mapItr->second;
269 }
270 return true;
271 });
272 }
273
MoveInstsBeforeEntryBlock(std::unordered_map<uint32_t,Instruction * > * preCallSB,BasicBlock * new_blk_ptr,BasicBlock::iterator call_inst_itr,UptrVectorIterator<BasicBlock> call_block_itr)274 void InlinePass::MoveInstsBeforeEntryBlock(
275 std::unordered_map<uint32_t, Instruction*>* preCallSB,
276 BasicBlock* new_blk_ptr, BasicBlock::iterator call_inst_itr,
277 UptrVectorIterator<BasicBlock> call_block_itr) {
278 for (auto cii = call_block_itr->begin(); cii != call_inst_itr;
279 cii = call_block_itr->begin()) {
280 Instruction* inst = &*cii;
281 inst->RemoveFromList();
282 std::unique_ptr<Instruction> cp_inst(inst);
283 // Remember same-block ops for possible regeneration.
284 if (IsSameBlockOp(&*cp_inst)) {
285 auto* sb_inst_ptr = cp_inst.get();
286 (*preCallSB)[cp_inst->result_id()] = sb_inst_ptr;
287 }
288 new_blk_ptr->AddInstruction(std::move(cp_inst));
289 }
290 }
291
AddGuardBlock(std::vector<std::unique_ptr<BasicBlock>> * new_blocks,std::unordered_map<uint32_t,uint32_t> * callee2caller,std::unique_ptr<BasicBlock> new_blk_ptr,uint32_t entry_blk_label_id)292 std::unique_ptr<BasicBlock> InlinePass::AddGuardBlock(
293 std::vector<std::unique_ptr<BasicBlock>>* new_blocks,
294 std::unordered_map<uint32_t, uint32_t>* callee2caller,
295 std::unique_ptr<BasicBlock> new_blk_ptr, uint32_t entry_blk_label_id) {
296 const auto guard_block_id = context()->TakeNextId();
297 if (guard_block_id == 0) {
298 return nullptr;
299 }
300 AddBranch(guard_block_id, &new_blk_ptr);
301 new_blocks->push_back(std::move(new_blk_ptr));
302 // Start the next block.
303 new_blk_ptr = MakeUnique<BasicBlock>(NewLabel(guard_block_id));
304 // Reset the mapping of the callee's entry block to point to
305 // the guard block. Do this so we can fix up phis later on to
306 // satisfy dominance.
307 (*callee2caller)[entry_blk_label_id] = guard_block_id;
308 return new_blk_ptr;
309 }
310
AddStoresForVariableInitializers(const std::unordered_map<uint32_t,uint32_t> & callee2caller,analysis::DebugInlinedAtContext * inlined_at_ctx,std::unique_ptr<BasicBlock> * new_blk_ptr,UptrVectorIterator<BasicBlock> callee_first_block_itr)311 InstructionList::iterator InlinePass::AddStoresForVariableInitializers(
312 const std::unordered_map<uint32_t, uint32_t>& callee2caller,
313 analysis::DebugInlinedAtContext* inlined_at_ctx,
314 std::unique_ptr<BasicBlock>* new_blk_ptr,
315 UptrVectorIterator<BasicBlock> callee_first_block_itr) {
316 auto callee_itr = callee_first_block_itr->begin();
317 while (callee_itr->opcode() == spv::Op::OpVariable ||
318 callee_itr->GetCommonDebugOpcode() == CommonDebugInfoDebugDeclare) {
319 if (callee_itr->opcode() == spv::Op::OpVariable &&
320 callee_itr->NumInOperands() == 2) {
321 assert(callee2caller.count(callee_itr->result_id()) &&
322 "Expected the variable to have already been mapped.");
323 uint32_t new_var_id = callee2caller.at(callee_itr->result_id());
324
325 // The initializer must be a constant or global value. No mapped
326 // should be used.
327 uint32_t val_id = callee_itr->GetSingleWordInOperand(1);
328 AddStore(new_var_id, val_id, new_blk_ptr, callee_itr->dbg_line_inst(),
329 context()->get_debug_info_mgr()->BuildDebugScope(
330 callee_itr->GetDebugScope(), inlined_at_ctx));
331 }
332 if (callee_itr->GetCommonDebugOpcode() == CommonDebugInfoDebugDeclare) {
333 InlineSingleInstruction(
334 callee2caller, new_blk_ptr->get(), &*callee_itr,
335 context()->get_debug_info_mgr()->BuildDebugInlinedAtChain(
336 callee_itr->GetDebugScope().GetInlinedAt(), inlined_at_ctx));
337 }
338 ++callee_itr;
339 }
340 return callee_itr;
341 }
342
InlineSingleInstruction(const std::unordered_map<uint32_t,uint32_t> & callee2caller,BasicBlock * new_blk_ptr,const Instruction * inst,uint32_t dbg_inlined_at)343 bool InlinePass::InlineSingleInstruction(
344 const std::unordered_map<uint32_t, uint32_t>& callee2caller,
345 BasicBlock* new_blk_ptr, const Instruction* inst, uint32_t dbg_inlined_at) {
346 // If we have return, it must be at the end of the callee. We will handle
347 // it at the end.
348 if (inst->opcode() == spv::Op::OpReturnValue ||
349 inst->opcode() == spv::Op::OpReturn)
350 return true;
351
352 // Copy callee instruction and remap all input Ids.
353 std::unique_ptr<Instruction> cp_inst(inst->Clone(context()));
354 cp_inst->ForEachInId([&callee2caller](uint32_t* iid) {
355 const auto mapItr = callee2caller.find(*iid);
356 if (mapItr != callee2caller.end()) {
357 *iid = mapItr->second;
358 }
359 });
360
361 // If result id is non-zero, remap it.
362 const uint32_t rid = cp_inst->result_id();
363 if (rid != 0) {
364 const auto mapItr = callee2caller.find(rid);
365 if (mapItr == callee2caller.end()) {
366 return false;
367 }
368 uint32_t nid = mapItr->second;
369 cp_inst->SetResultId(nid);
370 get_decoration_mgr()->CloneDecorations(rid, nid);
371 }
372
373 cp_inst->UpdateDebugInlinedAt(dbg_inlined_at);
374 new_blk_ptr->AddInstruction(std::move(cp_inst));
375 return true;
376 }
377
InlineReturn(const std::unordered_map<uint32_t,uint32_t> & callee2caller,std::vector<std::unique_ptr<BasicBlock>> * new_blocks,std::unique_ptr<BasicBlock> new_blk_ptr,analysis::DebugInlinedAtContext * inlined_at_ctx,Function * calleeFn,const Instruction * inst,uint32_t returnVarId)378 std::unique_ptr<BasicBlock> InlinePass::InlineReturn(
379 const std::unordered_map<uint32_t, uint32_t>& callee2caller,
380 std::vector<std::unique_ptr<BasicBlock>>* new_blocks,
381 std::unique_ptr<BasicBlock> new_blk_ptr,
382 analysis::DebugInlinedAtContext* inlined_at_ctx, Function* calleeFn,
383 const Instruction* inst, uint32_t returnVarId) {
384 // Store return value to return variable.
385 if (inst->opcode() == spv::Op::OpReturnValue) {
386 assert(returnVarId != 0);
387 uint32_t valId = inst->GetInOperand(kSpvReturnValueId).words[0];
388 const auto mapItr = callee2caller.find(valId);
389 if (mapItr != callee2caller.end()) {
390 valId = mapItr->second;
391 }
392 AddStore(returnVarId, valId, &new_blk_ptr, inst->dbg_line_inst(),
393 context()->get_debug_info_mgr()->BuildDebugScope(
394 inst->GetDebugScope(), inlined_at_ctx));
395 }
396
397 uint32_t returnLabelId = 0;
398 for (auto callee_block_itr = calleeFn->begin();
399 callee_block_itr != calleeFn->end(); ++callee_block_itr) {
400 if (spvOpcodeIsAbort(callee_block_itr->tail()->opcode())) {
401 returnLabelId = context()->TakeNextId();
402 break;
403 }
404 }
405 if (returnLabelId == 0) return new_blk_ptr;
406
407 if (inst->opcode() == spv::Op::OpReturn ||
408 inst->opcode() == spv::Op::OpReturnValue)
409 AddBranch(returnLabelId, &new_blk_ptr);
410 new_blocks->push_back(std::move(new_blk_ptr));
411 return MakeUnique<BasicBlock>(NewLabel(returnLabelId));
412 }
413
InlineEntryBlock(const std::unordered_map<uint32_t,uint32_t> & callee2caller,std::unique_ptr<BasicBlock> * new_blk_ptr,UptrVectorIterator<BasicBlock> callee_first_block,analysis::DebugInlinedAtContext * inlined_at_ctx)414 bool InlinePass::InlineEntryBlock(
415 const std::unordered_map<uint32_t, uint32_t>& callee2caller,
416 std::unique_ptr<BasicBlock>* new_blk_ptr,
417 UptrVectorIterator<BasicBlock> callee_first_block,
418 analysis::DebugInlinedAtContext* inlined_at_ctx) {
419 auto callee_inst_itr = AddStoresForVariableInitializers(
420 callee2caller, inlined_at_ctx, new_blk_ptr, callee_first_block);
421
422 while (callee_inst_itr != callee_first_block->end()) {
423 // Don't inline function definition links, the calling function is not a
424 // definition.
425 if (callee_inst_itr->GetShader100DebugOpcode() ==
426 NonSemanticShaderDebugInfo100DebugFunctionDefinition) {
427 ++callee_inst_itr;
428 continue;
429 }
430
431 if (!InlineSingleInstruction(
432 callee2caller, new_blk_ptr->get(), &*callee_inst_itr,
433 context()->get_debug_info_mgr()->BuildDebugInlinedAtChain(
434 callee_inst_itr->GetDebugScope().GetInlinedAt(),
435 inlined_at_ctx))) {
436 return false;
437 }
438 ++callee_inst_itr;
439 }
440 return true;
441 }
442
InlineBasicBlocks(std::vector<std::unique_ptr<BasicBlock>> * new_blocks,const std::unordered_map<uint32_t,uint32_t> & callee2caller,std::unique_ptr<BasicBlock> new_blk_ptr,analysis::DebugInlinedAtContext * inlined_at_ctx,Function * calleeFn)443 std::unique_ptr<BasicBlock> InlinePass::InlineBasicBlocks(
444 std::vector<std::unique_ptr<BasicBlock>>* new_blocks,
445 const std::unordered_map<uint32_t, uint32_t>& callee2caller,
446 std::unique_ptr<BasicBlock> new_blk_ptr,
447 analysis::DebugInlinedAtContext* inlined_at_ctx, Function* calleeFn) {
448 auto callee_block_itr = calleeFn->begin();
449 ++callee_block_itr;
450
451 while (callee_block_itr != calleeFn->end()) {
452 new_blocks->push_back(std::move(new_blk_ptr));
453 const auto mapItr =
454 callee2caller.find(callee_block_itr->GetLabelInst()->result_id());
455 if (mapItr == callee2caller.end()) return nullptr;
456 new_blk_ptr = MakeUnique<BasicBlock>(NewLabel(mapItr->second));
457
458 auto tail_inst_itr = callee_block_itr->end();
459 for (auto inst_itr = callee_block_itr->begin(); inst_itr != tail_inst_itr;
460 ++inst_itr) {
461 // Don't inline function definition links, the calling function is not a
462 // definition
463 if (inst_itr->GetShader100DebugOpcode() ==
464 NonSemanticShaderDebugInfo100DebugFunctionDefinition)
465 continue;
466 if (!InlineSingleInstruction(
467 callee2caller, new_blk_ptr.get(), &*inst_itr,
468 context()->get_debug_info_mgr()->BuildDebugInlinedAtChain(
469 inst_itr->GetDebugScope().GetInlinedAt(), inlined_at_ctx))) {
470 return nullptr;
471 }
472 }
473
474 ++callee_block_itr;
475 }
476 return new_blk_ptr;
477 }
478
MoveCallerInstsAfterFunctionCall(std::unordered_map<uint32_t,Instruction * > * preCallSB,std::unordered_map<uint32_t,uint32_t> * postCallSB,std::unique_ptr<BasicBlock> * new_blk_ptr,BasicBlock::iterator call_inst_itr,bool multiBlocks)479 bool InlinePass::MoveCallerInstsAfterFunctionCall(
480 std::unordered_map<uint32_t, Instruction*>* preCallSB,
481 std::unordered_map<uint32_t, uint32_t>* postCallSB,
482 std::unique_ptr<BasicBlock>* new_blk_ptr,
483 BasicBlock::iterator call_inst_itr, bool multiBlocks) {
484 // Copy remaining instructions from caller block.
485 for (Instruction* inst = call_inst_itr->NextNode(); inst;
486 inst = call_inst_itr->NextNode()) {
487 inst->RemoveFromList();
488 std::unique_ptr<Instruction> cp_inst(inst);
489 // If multiple blocks generated, regenerate any same-block
490 // instruction that has not been seen in this last block.
491 if (multiBlocks) {
492 if (!CloneSameBlockOps(&cp_inst, postCallSB, preCallSB, new_blk_ptr)) {
493 return false;
494 }
495
496 // Remember same-block ops in this block.
497 if (IsSameBlockOp(&*cp_inst)) {
498 const uint32_t rid = cp_inst->result_id();
499 (*postCallSB)[rid] = rid;
500 }
501 }
502 new_blk_ptr->get()->AddInstruction(std::move(cp_inst));
503 }
504
505 return true;
506 }
507
MoveLoopMergeInstToFirstBlock(std::vector<std::unique_ptr<BasicBlock>> * new_blocks)508 void InlinePass::MoveLoopMergeInstToFirstBlock(
509 std::vector<std::unique_ptr<BasicBlock>>* new_blocks) {
510 // Move the OpLoopMerge from the last block back to the first, where
511 // it belongs.
512 auto& first = new_blocks->front();
513 auto& last = new_blocks->back();
514 assert(first != last);
515
516 // Insert a modified copy of the loop merge into the first block.
517 auto loop_merge_itr = last->tail();
518 --loop_merge_itr;
519 assert(loop_merge_itr->opcode() == spv::Op::OpLoopMerge);
520 std::unique_ptr<Instruction> cp_inst(loop_merge_itr->Clone(context()));
521 first->tail().InsertBefore(std::move(cp_inst));
522
523 // Remove the loop merge from the last block.
524 loop_merge_itr->RemoveFromList();
525 delete &*loop_merge_itr;
526 }
527
UpdateSingleBlockLoopContinueTarget(uint32_t new_id,std::vector<std::unique_ptr<BasicBlock>> * new_blocks)528 void InlinePass::UpdateSingleBlockLoopContinueTarget(
529 uint32_t new_id, std::vector<std::unique_ptr<BasicBlock>>* new_blocks) {
530 auto& header = new_blocks->front();
531 auto* merge_inst = header->GetLoopMergeInst();
532
533 // The back-edge block is split at the branch to create a new back-edge
534 // block. The old block is modified to branch to the new block. The loop
535 // merge instruction is updated to declare the new block as the continue
536 // target. This has the effect of changing the loop from being a large
537 // continue construct and an empty loop construct to being a loop with a loop
538 // construct and a trivial continue construct. This change is made to satisfy
539 // structural dominance.
540
541 // Add the new basic block.
542 std::unique_ptr<BasicBlock> new_block =
543 MakeUnique<BasicBlock>(NewLabel(new_id));
544 auto& old_backedge = new_blocks->back();
545 auto old_branch = old_backedge->tail();
546
547 // Move the old back edge into the new block.
548 std::unique_ptr<Instruction> br(&*old_branch);
549 new_block->AddInstruction(std::move(br));
550
551 // Add a branch to the new block from the old back-edge block.
552 AddBranch(new_id, &old_backedge);
553 new_blocks->push_back(std::move(new_block));
554
555 // Update the loop's continue target to the new block.
556 merge_inst->SetInOperand(1u, {new_id});
557 }
558
GenInlineCode(std::vector<std::unique_ptr<BasicBlock>> * new_blocks,std::vector<std::unique_ptr<Instruction>> * new_vars,BasicBlock::iterator call_inst_itr,UptrVectorIterator<BasicBlock> call_block_itr)559 bool InlinePass::GenInlineCode(
560 std::vector<std::unique_ptr<BasicBlock>>* new_blocks,
561 std::vector<std::unique_ptr<Instruction>>* new_vars,
562 BasicBlock::iterator call_inst_itr,
563 UptrVectorIterator<BasicBlock> call_block_itr) {
564 // Map from all ids in the callee to their equivalent id in the caller
565 // as callee instructions are copied into caller.
566 std::unordered_map<uint32_t, uint32_t> callee2caller;
567 // Pre-call same-block insts
568 std::unordered_map<uint32_t, Instruction*> preCallSB;
569 // Post-call same-block op ids
570 std::unordered_map<uint32_t, uint32_t> postCallSB;
571
572 analysis::DebugInlinedAtContext inlined_at_ctx(&*call_inst_itr);
573
574 // Invalidate the def-use chains. They are not kept up to date while
575 // inlining. However, certain calls try to keep them up-to-date if they are
576 // valid. These operations can fail.
577 context()->InvalidateAnalyses(IRContext::kAnalysisDefUse);
578
579 // If the caller is a loop header and the callee has multiple blocks, then the
580 // normal inlining logic will place the OpLoopMerge in the last of several
581 // blocks in the loop. Instead, it should be placed at the end of the first
582 // block. We'll wait to move the OpLoopMerge until the end of the regular
583 // inlining logic, and only if necessary.
584 bool caller_is_loop_header = call_block_itr->GetLoopMergeInst() != nullptr;
585
586 // Single-trip loop continue block
587 std::unique_ptr<BasicBlock> single_trip_loop_cont_blk;
588
589 Function* calleeFn = id2function_[call_inst_itr->GetSingleWordOperand(
590 kSpvFunctionCallFunctionId)];
591
592 // Map parameters to actual arguments.
593 MapParams(calleeFn, call_inst_itr, &callee2caller);
594
595 // Define caller local variables for all callee variables and create map to
596 // them.
597 if (!CloneAndMapLocals(calleeFn, new_vars, &callee2caller, &inlined_at_ctx)) {
598 return false;
599 }
600
601 // First block needs to use label of original block
602 // but map callee label in case of phi reference.
603 uint32_t entry_blk_label_id = calleeFn->begin()->GetLabelInst()->result_id();
604 callee2caller[entry_blk_label_id] = call_block_itr->id();
605 std::unique_ptr<BasicBlock> new_blk_ptr =
606 MakeUnique<BasicBlock>(NewLabel(call_block_itr->id()));
607
608 // Move instructions of original caller block up to call instruction.
609 MoveInstsBeforeEntryBlock(&preCallSB, new_blk_ptr.get(), call_inst_itr,
610 call_block_itr);
611
612 if (caller_is_loop_header &&
613 (*(calleeFn->begin())).GetMergeInst() != nullptr) {
614 // We can't place both the caller's merge instruction and
615 // another merge instruction in the same block. So split the
616 // calling block. Insert an unconditional branch to a new guard
617 // block. Later, once we know the ID of the last block, we
618 // will move the caller's OpLoopMerge from the last generated
619 // block into the first block. We also wait to avoid
620 // invalidating various iterators.
621 new_blk_ptr = AddGuardBlock(new_blocks, &callee2caller,
622 std::move(new_blk_ptr), entry_blk_label_id);
623 if (new_blk_ptr == nullptr) return false;
624 }
625
626 // Create return var if needed.
627 const uint32_t calleeTypeId = calleeFn->type_id();
628 uint32_t returnVarId = 0;
629 analysis::Type* calleeType = context()->get_type_mgr()->GetType(calleeTypeId);
630 if (calleeType->AsVoid() == nullptr) {
631 returnVarId = CreateReturnVar(calleeFn, new_vars);
632 if (returnVarId == 0) {
633 return false;
634 }
635 }
636
637 calleeFn->WhileEachInst([&callee2caller, this](const Instruction* cpi) {
638 // Create set of callee result ids. Used to detect forward references
639 const uint32_t rid = cpi->result_id();
640 if (rid != 0 && callee2caller.find(rid) == callee2caller.end()) {
641 const uint32_t nid = context()->TakeNextId();
642 if (nid == 0) return false;
643 callee2caller[rid] = nid;
644 }
645 return true;
646 });
647
648 // Inline DebugClare instructions in the callee's header.
649 calleeFn->ForEachDebugInstructionsInHeader(
650 [&new_blk_ptr, &callee2caller, &inlined_at_ctx, this](Instruction* inst) {
651 InlineSingleInstruction(
652 callee2caller, new_blk_ptr.get(), inst,
653 context()->get_debug_info_mgr()->BuildDebugInlinedAtChain(
654 inst->GetDebugScope().GetInlinedAt(), &inlined_at_ctx));
655 });
656
657 // Inline the entry block of the callee function.
658 if (!InlineEntryBlock(callee2caller, &new_blk_ptr, calleeFn->begin(),
659 &inlined_at_ctx)) {
660 return false;
661 }
662
663 // Inline blocks of the callee function other than the entry block.
664 new_blk_ptr =
665 InlineBasicBlocks(new_blocks, callee2caller, std::move(new_blk_ptr),
666 &inlined_at_ctx, calleeFn);
667 if (new_blk_ptr == nullptr) return false;
668
669 new_blk_ptr = InlineReturn(callee2caller, new_blocks, std::move(new_blk_ptr),
670 &inlined_at_ctx, calleeFn,
671 &*(calleeFn->tail()->tail()), returnVarId);
672
673 // Load return value into result id of call, if it exists.
674 if (returnVarId != 0) {
675 const uint32_t resId = call_inst_itr->result_id();
676 assert(resId != 0);
677 AddLoad(calleeTypeId, resId, returnVarId, &new_blk_ptr,
678 call_inst_itr->dbg_line_inst(), call_inst_itr->GetDebugScope());
679 }
680
681 // Move instructions of original caller block after call instruction.
682 if (!MoveCallerInstsAfterFunctionCall(&preCallSB, &postCallSB, &new_blk_ptr,
683 call_inst_itr,
684 calleeFn->begin() != calleeFn->end()))
685 return false;
686
687 // Finalize inline code.
688 new_blocks->push_back(std::move(new_blk_ptr));
689
690 if (caller_is_loop_header && (new_blocks->size() > 1)) {
691 MoveLoopMergeInstToFirstBlock(new_blocks);
692
693 // If the loop was a single basic block previously, update it's structure.
694 auto& header = new_blocks->front();
695 auto* merge_inst = header->GetLoopMergeInst();
696 if (merge_inst->GetSingleWordInOperand(1u) == header->id()) {
697 auto new_id = context()->TakeNextId();
698 if (new_id == 0) return false;
699 UpdateSingleBlockLoopContinueTarget(new_id, new_blocks);
700 }
701 }
702
703 // Update block map given replacement blocks.
704 for (auto& blk : *new_blocks) {
705 id2block_[blk->id()] = &*blk;
706 }
707
708 // We need to kill the name and decorations for the call, which will be
709 // deleted.
710 context()->KillNamesAndDecorates(&*call_inst_itr);
711
712 return true;
713 }
714
IsInlinableFunctionCall(const Instruction * inst)715 bool InlinePass::IsInlinableFunctionCall(const Instruction* inst) {
716 if (inst->opcode() != spv::Op::OpFunctionCall) return false;
717 const uint32_t calleeFnId =
718 inst->GetSingleWordOperand(kSpvFunctionCallFunctionId);
719 const auto ci = inlinable_.find(calleeFnId);
720 if (ci == inlinable_.cend()) return false;
721
722 if (early_return_funcs_.find(calleeFnId) != early_return_funcs_.end()) {
723 // We rely on the merge-return pass to handle the early return case
724 // in advance.
725 std::string message =
726 "The function '" + id2function_[calleeFnId]->DefInst().PrettyPrint() +
727 "' could not be inlined because the return instruction "
728 "is not at the end of the function. This could be fixed by "
729 "running merge-return before inlining.";
730 consumer()(SPV_MSG_WARNING, "", {0, 0, 0}, message.c_str());
731 return false;
732 }
733
734 return true;
735 }
736
UpdateSucceedingPhis(std::vector<std::unique_ptr<BasicBlock>> & new_blocks)737 void InlinePass::UpdateSucceedingPhis(
738 std::vector<std::unique_ptr<BasicBlock>>& new_blocks) {
739 const auto firstBlk = new_blocks.begin();
740 const auto lastBlk = new_blocks.end() - 1;
741 const uint32_t firstId = (*firstBlk)->id();
742 const uint32_t lastId = (*lastBlk)->id();
743 const BasicBlock& const_last_block = *lastBlk->get();
744 const_last_block.ForEachSuccessorLabel(
745 [&firstId, &lastId, this](const uint32_t succ) {
746 BasicBlock* sbp = this->id2block_[succ];
747 sbp->ForEachPhiInst([&firstId, &lastId](Instruction* phi) {
748 phi->ForEachInId([&firstId, &lastId](uint32_t* id) {
749 if (*id == firstId) *id = lastId;
750 });
751 });
752 });
753 }
754
HasNoReturnInLoop(Function * func)755 bool InlinePass::HasNoReturnInLoop(Function* func) {
756 // If control not structured, do not do loop/return analysis
757 // TODO: Analyze returns in non-structured control flow
758 if (!context()->get_feature_mgr()->HasCapability(spv::Capability::Shader))
759 return false;
760 const auto structured_analysis = context()->GetStructuredCFGAnalysis();
761 // Search for returns in structured construct.
762 bool return_in_loop = false;
763 for (auto& blk : *func) {
764 auto terminal_ii = blk.cend();
765 --terminal_ii;
766 if (spvOpcodeIsReturn(terminal_ii->opcode()) &&
767 structured_analysis->ContainingLoop(blk.id()) != 0) {
768 return_in_loop = true;
769 break;
770 }
771 }
772 return !return_in_loop;
773 }
774
AnalyzeReturns(Function * func)775 void InlinePass::AnalyzeReturns(Function* func) {
776 // Analyze functions without a return in loop.
777 if (HasNoReturnInLoop(func)) {
778 no_return_in_loop_.insert(func->result_id());
779 }
780 // Analyze functions with a return before its tail basic block.
781 for (auto& blk : *func) {
782 auto terminal_ii = blk.cend();
783 --terminal_ii;
784 if (spvOpcodeIsReturn(terminal_ii->opcode()) && &blk != func->tail()) {
785 early_return_funcs_.insert(func->result_id());
786 break;
787 }
788 }
789 }
790
IsInlinableFunction(Function * func)791 bool InlinePass::IsInlinableFunction(Function* func) {
792 // We can only inline a function if it has blocks.
793 if (func->cbegin() == func->cend()) return false;
794
795 // Do not inline functions with DontInline flag.
796 if (func->control_mask() & uint32_t(spv::FunctionControlMask::DontInline)) {
797 return false;
798 }
799
800 // Do not inline functions with returns in loops. Currently early return
801 // functions are inlined by wrapping them in a one trip loop and implementing
802 // the returns as a branch to the loop's merge block. However, this can only
803 // done validly if the return was not in a loop in the original function.
804 // Also remember functions with multiple (early) returns.
805 AnalyzeReturns(func);
806 if (no_return_in_loop_.find(func->result_id()) == no_return_in_loop_.cend()) {
807 return false;
808 }
809
810 if (func->IsRecursive()) {
811 return false;
812 }
813
814 // Do not inline functions with an abort instruction if they are called from a
815 // continue construct. If it is inlined into a continue construct the backedge
816 // will no longer post-dominate the continue target, which is invalid. An
817 // `OpUnreachable` is acceptable because it will not change post-dominance if
818 // it is statically unreachable.
819 bool func_is_called_from_continue =
820 funcs_called_from_continue_.count(func->result_id()) != 0;
821
822 if (func_is_called_from_continue && ContainsAbortOtherThanUnreachable(func)) {
823 return false;
824 }
825
826 return true;
827 }
828
ContainsAbortOtherThanUnreachable(Function * func) const829 bool InlinePass::ContainsAbortOtherThanUnreachable(Function* func) const {
830 return !func->WhileEachInst([](Instruction* inst) {
831 return inst->opcode() == spv::Op::OpUnreachable ||
832 !spvOpcodeIsAbort(inst->opcode());
833 });
834 }
835
InitializeInline()836 void InlinePass::InitializeInline() {
837 false_id_ = 0;
838
839 // clear collections
840 id2function_.clear();
841 id2block_.clear();
842 inlinable_.clear();
843 no_return_in_loop_.clear();
844 early_return_funcs_.clear();
845 funcs_called_from_continue_ =
846 context()->GetStructuredCFGAnalysis()->FindFuncsCalledFromContinue();
847
848 for (auto& fn : *get_module()) {
849 // Initialize function and block maps.
850 id2function_[fn.result_id()] = &fn;
851 for (auto& blk : fn) {
852 id2block_[blk.id()] = &blk;
853 }
854 // Compute inlinability
855 if (IsInlinableFunction(&fn)) inlinable_.insert(fn.result_id());
856 }
857 }
858
InlinePass()859 InlinePass::InlinePass() {}
860
861 } // namespace opt
862 } // namespace spvtools
863