1 //
2 // Copyright 2023 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // ShareGroupVk.cpp:
7 // Implements the class methods for ShareGroupVk.
8 //
9
10 #include "libANGLE/renderer/vulkan/ShareGroupVk.h"
11
12 #include "common/debug.h"
13 #include "common/system_utils.h"
14 #include "libANGLE/Context.h"
15 #include "libANGLE/Display.h"
16 #include "libANGLE/renderer/vulkan/BufferVk.h"
17 #include "libANGLE/renderer/vulkan/ContextVk.h"
18 #include "libANGLE/renderer/vulkan/DeviceVk.h"
19 #include "libANGLE/renderer/vulkan/ImageVk.h"
20 #include "libANGLE/renderer/vulkan/SurfaceVk.h"
21 #include "libANGLE/renderer/vulkan/SyncVk.h"
22 #include "libANGLE/renderer/vulkan/TextureVk.h"
23 #include "libANGLE/renderer/vulkan/VkImageImageSiblingVk.h"
24 #include "libANGLE/renderer/vulkan/vk_renderer.h"
25
26 namespace rx
27 {
28
29 namespace
30 {
31 // How often monolithic pipelines should be created, if preferMonolithicPipelinesOverLibraries is
32 // enabled. Pipeline creation is typically O(hundreds of microseconds). A value of 2ms is chosen
33 // arbitrarily; it ensures that there is always at most a single pipeline job in progress, while
34 // maintaining a high throughput of 500 pipelines / second for heavier applications.
35 constexpr double kMonolithicPipelineJobPeriod = 0.002;
36
37 // Time interval in seconds that we should try to prune default buffer pools.
38 constexpr double kTimeElapsedForPruneDefaultBufferPool = 0.25;
39
ValidateIdenticalPriority(const egl::ContextMap & contexts,egl::ContextPriority sharedPriority)40 bool ValidateIdenticalPriority(const egl::ContextMap &contexts, egl::ContextPriority sharedPriority)
41 {
42 if (sharedPriority == egl::ContextPriority::InvalidEnum)
43 {
44 return false;
45 }
46
47 for (auto context : contexts)
48 {
49 const ContextVk *contextVk = vk::GetImpl(context.second);
50 if (contextVk->getPriority() != sharedPriority)
51 {
52 return false;
53 }
54 }
55
56 return true;
57 }
58 } // namespace
59
60 // Set to true will log bufferpool stats into INFO stream
61 #define ANGLE_ENABLE_BUFFER_POOL_STATS_LOGGING false
62
ShareGroupVk(const egl::ShareGroupState & state,vk::Renderer * renderer)63 ShareGroupVk::ShareGroupVk(const egl::ShareGroupState &state, vk::Renderer *renderer)
64 : ShareGroupImpl(state),
65 mRenderer(renderer),
66 mCurrentFrameCount(0),
67 mContextsPriority(egl::ContextPriority::InvalidEnum),
68 mIsContextsPriorityLocked(false),
69 mLastMonolithicPipelineJobTime(0)
70 {
71 mLastPruneTime = angle::GetCurrentSystemTime();
72 }
73
onContextAdd()74 void ShareGroupVk::onContextAdd()
75 {
76 ASSERT(ValidateIdenticalPriority(getContexts(), mContextsPriority));
77 }
78
unifyContextsPriority(ContextVk * newContextVk)79 angle::Result ShareGroupVk::unifyContextsPriority(ContextVk *newContextVk)
80 {
81 const egl::ContextPriority newContextPriority = newContextVk->getPriority();
82 ASSERT(newContextPriority != egl::ContextPriority::InvalidEnum);
83
84 if (mContextsPriority == egl::ContextPriority::InvalidEnum)
85 {
86 ASSERT(!mIsContextsPriorityLocked);
87 ASSERT(getContexts().empty());
88 mContextsPriority = newContextPriority;
89 return angle::Result::Continue;
90 }
91
92 static_assert(egl::ContextPriority::Low < egl::ContextPriority::Medium);
93 static_assert(egl::ContextPriority::Medium < egl::ContextPriority::High);
94 if (mContextsPriority >= newContextPriority || mIsContextsPriorityLocked)
95 {
96 newContextVk->setPriority(mContextsPriority);
97 return angle::Result::Continue;
98 }
99
100 ANGLE_TRY(updateContextsPriority(newContextVk, newContextPriority));
101
102 return angle::Result::Continue;
103 }
104
lockDefaultContextsPriority(ContextVk * contextVk)105 angle::Result ShareGroupVk::lockDefaultContextsPriority(ContextVk *contextVk)
106 {
107 constexpr egl::ContextPriority kDefaultPriority = egl::ContextPriority::Medium;
108 if (!mIsContextsPriorityLocked)
109 {
110 if (mContextsPriority != kDefaultPriority)
111 {
112 ANGLE_TRY(updateContextsPriority(contextVk, kDefaultPriority));
113 }
114 mIsContextsPriorityLocked = true;
115 }
116 ASSERT(mContextsPriority == kDefaultPriority);
117 return angle::Result::Continue;
118 }
119
updateContextsPriority(ContextVk * contextVk,egl::ContextPriority newPriority)120 angle::Result ShareGroupVk::updateContextsPriority(ContextVk *contextVk,
121 egl::ContextPriority newPriority)
122 {
123 ASSERT(!mIsContextsPriorityLocked);
124 ASSERT(newPriority != egl::ContextPriority::InvalidEnum);
125 ASSERT(newPriority != mContextsPriority);
126 if (mContextsPriority == egl::ContextPriority::InvalidEnum)
127 {
128 ASSERT(getContexts().empty());
129 mContextsPriority = newPriority;
130 return angle::Result::Continue;
131 }
132
133 vk::ProtectionTypes protectionTypes;
134 protectionTypes.set(contextVk->getProtectionType());
135 for (auto context : getContexts())
136 {
137 protectionTypes.set(vk::GetImpl(context.second)->getProtectionType());
138 }
139
140 {
141 vk::ScopedQueueSerialIndex index;
142 ANGLE_TRY(mRenderer->allocateScopedQueueSerialIndex(&index));
143 ANGLE_TRY(mRenderer->submitPriorityDependency(contextVk, protectionTypes, mContextsPriority,
144 newPriority, index.get()));
145 }
146
147 for (auto context : getContexts())
148 {
149 ContextVk *sharedContextVk = vk::GetImpl(context.second);
150
151 ASSERT(sharedContextVk->getPriority() == mContextsPriority);
152 sharedContextVk->setPriority(newPriority);
153 }
154 mContextsPriority = newPriority;
155
156 return angle::Result::Continue;
157 }
158
onDestroy(const egl::Display * display)159 void ShareGroupVk::onDestroy(const egl::Display *display)
160 {
161 DisplayVk *displayVk = vk::GetImpl(display);
162
163 mRefCountedEventsGarbageRecycler.destroy(mRenderer);
164
165 for (std::unique_ptr<vk::BufferPool> &pool : mDefaultBufferPools)
166 {
167 if (pool)
168 {
169 // If any context uses display texture share group, it is expected that a
170 // BufferBlock may still in used by textures that outlived ShareGroup. The
171 // non-empty BufferBlock will be put into Renderer's orphan list instead.
172 pool->destroy(mRenderer, mState.hasAnyContextWithDisplayTextureShareGroup());
173 }
174 }
175
176 mPipelineLayoutCache.destroy(mRenderer);
177 mDescriptorSetLayoutCache.destroy(mRenderer);
178
179 mMetaDescriptorPools[DescriptorSetIndex::UniformsAndXfb].destroy(mRenderer);
180 mMetaDescriptorPools[DescriptorSetIndex::Texture].destroy(mRenderer);
181 mMetaDescriptorPools[DescriptorSetIndex::ShaderResource].destroy(mRenderer);
182
183 mFramebufferCache.destroy(mRenderer);
184 resetPrevTexture();
185
186 mVertexInputGraphicsPipelineCache.destroy(displayVk);
187 mFragmentOutputGraphicsPipelineCache.destroy(displayVk);
188 }
189
onMutableTextureUpload(ContextVk * contextVk,TextureVk * newTexture)190 angle::Result ShareGroupVk::onMutableTextureUpload(ContextVk *contextVk, TextureVk *newTexture)
191 {
192 return mTextureUpload.onMutableTextureUpload(contextVk, newTexture);
193 }
194
onTextureRelease(TextureVk * textureVk)195 void ShareGroupVk::onTextureRelease(TextureVk *textureVk)
196 {
197 mTextureUpload.onTextureRelease(textureVk);
198 }
199
scheduleMonolithicPipelineCreationTask(ContextVk * contextVk,vk::WaitableMonolithicPipelineCreationTask * taskOut)200 angle::Result ShareGroupVk::scheduleMonolithicPipelineCreationTask(
201 ContextVk *contextVk,
202 vk::WaitableMonolithicPipelineCreationTask *taskOut)
203 {
204 ASSERT(contextVk->getFeatures().preferMonolithicPipelinesOverLibraries.enabled);
205
206 // Limit to a single task to avoid hogging all the cores.
207 if (mMonolithicPipelineCreationEvent && !mMonolithicPipelineCreationEvent->isReady())
208 {
209 return angle::Result::Continue;
210 }
211
212 // Additionally, rate limit the job postings.
213 double currentTime = angle::GetCurrentSystemTime();
214 if (currentTime - mLastMonolithicPipelineJobTime < kMonolithicPipelineJobPeriod)
215 {
216 return angle::Result::Continue;
217 }
218
219 mLastMonolithicPipelineJobTime = currentTime;
220
221 const vk::RenderPass *compatibleRenderPass = nullptr;
222 // Pull in a compatible RenderPass to be used by the task. This is done at the last minute,
223 // just before the task is scheduled, to minimize the time this reference to the render pass
224 // cache is held. If the render pass cache needs to be cleared, the main thread will wait
225 // for the job to complete.
226 ANGLE_TRY(contextVk->getCompatibleRenderPass(taskOut->getTask()->getRenderPassDesc(),
227 &compatibleRenderPass));
228 taskOut->setRenderPass(compatibleRenderPass);
229
230 mMonolithicPipelineCreationEvent =
231 mRenderer->getGlobalOps()->postMultiThreadWorkerTask(taskOut->getTask());
232
233 taskOut->onSchedule(mMonolithicPipelineCreationEvent);
234
235 return angle::Result::Continue;
236 }
237
waitForCurrentMonolithicPipelineCreationTask()238 void ShareGroupVk::waitForCurrentMonolithicPipelineCreationTask()
239 {
240 if (mMonolithicPipelineCreationEvent)
241 {
242 mMonolithicPipelineCreationEvent->wait();
243 }
244 }
245
onMutableTextureUpload(ContextVk * contextVk,TextureVk * newTexture)246 angle::Result TextureUpload::onMutableTextureUpload(ContextVk *contextVk, TextureVk *newTexture)
247 {
248 // This feature is currently disabled in the case of display-level texture sharing.
249 ASSERT(!contextVk->hasDisplayTextureShareGroup());
250 ASSERT(!newTexture->isImmutable());
251 ASSERT(mPrevUploadedMutableTexture == nullptr || !mPrevUploadedMutableTexture->isImmutable());
252
253 // If the previous texture is null, it should be set to the current texture. We also have to
254 // make sure that the previous texture pointer is still a mutable texture. Otherwise, we skip
255 // the optimization.
256 if (mPrevUploadedMutableTexture == nullptr)
257 {
258 mPrevUploadedMutableTexture = newTexture;
259 return angle::Result::Continue;
260 }
261
262 // Skip the optimization if we have not switched to a new texture yet.
263 if (mPrevUploadedMutableTexture == newTexture)
264 {
265 return angle::Result::Continue;
266 }
267
268 // If the mutable texture is consistently specified, we initialize a full mip chain for it.
269 if (mPrevUploadedMutableTexture->isMutableTextureConsistentlySpecifiedForFlush())
270 {
271 ANGLE_TRY(mPrevUploadedMutableTexture->ensureImageInitialized(
272 contextVk, ImageMipLevels::EnabledLevels));
273 contextVk->getPerfCounters().mutableTexturesUploaded++;
274 }
275
276 // Update the mutable texture pointer with the new pointer for the next potential flush.
277 mPrevUploadedMutableTexture = newTexture;
278
279 return angle::Result::Continue;
280 }
281
onTextureRelease(TextureVk * textureVk)282 void TextureUpload::onTextureRelease(TextureVk *textureVk)
283 {
284 if (mPrevUploadedMutableTexture == textureVk)
285 {
286 resetPrevTexture();
287 }
288 }
289
onFramebufferBoundary()290 void ShareGroupVk::onFramebufferBoundary()
291 {
292 if (isDueForBufferPoolPrune())
293 {
294 pruneDefaultBufferPools();
295 }
296
297 // Always clean up event garbage and destroy the excessive free list at frame boundary.
298 cleanupRefCountedEventGarbage();
299
300 mCurrentFrameCount++;
301 }
302
getDefaultBufferPool(VkDeviceSize size,uint32_t memoryTypeIndex,BufferUsageType usageType)303 vk::BufferPool *ShareGroupVk::getDefaultBufferPool(VkDeviceSize size,
304 uint32_t memoryTypeIndex,
305 BufferUsageType usageType)
306 {
307 if (!mDefaultBufferPools[memoryTypeIndex])
308 {
309 const vk::Allocator &allocator = mRenderer->getAllocator();
310 VkBufferUsageFlags usageFlags = GetDefaultBufferUsageFlags(mRenderer);
311
312 VkMemoryPropertyFlags memoryPropertyFlags;
313 allocator.getMemoryTypeProperties(memoryTypeIndex, &memoryPropertyFlags);
314
315 std::unique_ptr<vk::BufferPool> pool = std::make_unique<vk::BufferPool>();
316 vma::VirtualBlockCreateFlags vmaFlags = vma::VirtualBlockCreateFlagBits::GENERAL;
317 pool->initWithFlags(mRenderer, vmaFlags, usageFlags, 0, memoryTypeIndex,
318 memoryPropertyFlags);
319 mDefaultBufferPools[memoryTypeIndex] = std::move(pool);
320 }
321
322 return mDefaultBufferPools[memoryTypeIndex].get();
323 }
324
pruneDefaultBufferPools()325 void ShareGroupVk::pruneDefaultBufferPools()
326 {
327 mLastPruneTime = angle::GetCurrentSystemTime();
328
329 // Bail out if no suballocation have been destroyed since last prune.
330 if (mRenderer->getSuballocationDestroyedSize() == 0)
331 {
332 return;
333 }
334
335 for (std::unique_ptr<vk::BufferPool> &pool : mDefaultBufferPools)
336 {
337 if (pool)
338 {
339 pool->pruneEmptyBuffers(mRenderer);
340 }
341 }
342
343 mRenderer->onBufferPoolPrune();
344
345 #if ANGLE_ENABLE_BUFFER_POOL_STATS_LOGGING
346 logBufferPools();
347 #endif
348 }
349
isDueForBufferPoolPrune()350 bool ShareGroupVk::isDueForBufferPoolPrune()
351 {
352 // Ensure we periodically prune to maintain the heuristic information
353 double timeElapsed = angle::GetCurrentSystemTime() - mLastPruneTime;
354 if (timeElapsed > kTimeElapsedForPruneDefaultBufferPool)
355 {
356 return true;
357 }
358
359 // If we have destroyed a lot of memory, also prune to ensure memory gets freed as soon as
360 // possible
361 if (mRenderer->getSuballocationDestroyedSize() >= kMaxTotalEmptyBufferBytes)
362 {
363 return true;
364 }
365
366 return false;
367 }
368
calculateTotalBufferCount(size_t * bufferCount,VkDeviceSize * totalSize) const369 void ShareGroupVk::calculateTotalBufferCount(size_t *bufferCount, VkDeviceSize *totalSize) const
370 {
371 *bufferCount = 0;
372 *totalSize = 0;
373 for (const std::unique_ptr<vk::BufferPool> &pool : mDefaultBufferPools)
374 {
375 if (pool)
376 {
377 *bufferCount += pool->getBufferCount();
378 *totalSize += pool->getMemorySize();
379 }
380 }
381 }
382
logBufferPools() const383 void ShareGroupVk::logBufferPools() const
384 {
385 for (size_t i = 0; i < mDefaultBufferPools.size(); i++)
386 {
387 const std::unique_ptr<vk::BufferPool> &pool = mDefaultBufferPools[i];
388 if (pool && pool->getBufferCount() > 0)
389 {
390 std::ostringstream log;
391 pool->addStats(&log);
392 INFO() << "Pool[" << i << "]:" << log.str();
393 }
394 }
395 }
396 } // namespace rx
397