1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrVkGpu_DEFINED 9 #define GrVkGpu_DEFINED 10 11 #include "include/core/SkDrawable.h" 12 #include "include/core/SkRefCnt.h" 13 #include "include/gpu/GpuTypes.h" 14 #include "include/gpu/ganesh/GrBackendSurface.h" 15 #include "include/gpu/ganesh/GrTypes.h" 16 #include "include/gpu/ganesh/vk/GrVkBackendSurface.h" 17 #include "include/gpu/vk/VulkanTypes.h" 18 #include "include/private/base/SkAssert.h" 19 #include "include/private/base/SkSpan_impl.h" 20 #include "include/private/base/SkTArray.h" 21 #include "include/private/gpu/ganesh/GrTypesPriv.h" 22 #include "include/private/gpu/vk/SkiaVulkan.h" 23 #include "src/gpu/RefCntedCallback.h" 24 #include "src/gpu/ganesh/GrGpu.h" 25 #include "src/gpu/ganesh/GrOpsRenderPass.h" 26 #include "src/gpu/ganesh/GrSamplerState.h" 27 #include "src/gpu/ganesh/GrStagingBufferManager.h" 28 #include "src/gpu/ganesh/GrXferProcessor.h" 29 #include "src/gpu/ganesh/vk/GrVkCaps.h" 30 #include "src/gpu/ganesh/vk/GrVkMSAALoadManager.h" 31 #include "src/gpu/ganesh/vk/GrVkResourceProvider.h" 32 #include "src/gpu/ganesh/vk/GrVkSemaphore.h" 33 34 #include <array> 35 #include <cstddef> 36 #include <cstdint> 37 #include <memory> 38 #include <optional> 39 #include <string_view> 40 #include <utility> 41 42 class GrAttachment; 43 class GrBackendSemaphore; 44 class GrDirectContext; 45 class GrGpuBuffer; 46 class GrManagedResource; 47 class GrProgramDesc; 48 class GrProgramInfo; 49 class GrRenderTarget; 50 class GrSemaphore; 51 class GrSurface; 52 class GrSurfaceProxy; 53 class GrTexture; 54 class GrThreadSafePipelineBuilder; 55 class GrVkBuffer; 56 class GrVkCommandBuffer; 57 class GrVkCommandPool; 58 class GrVkFramebuffer; 59 class GrVkImage; 60 class GrVkOpsRenderPass; 61 class GrVkPrimaryCommandBuffer; 62 class GrVkRenderPass; 63 class GrVkRenderTarget; 64 class GrVkSecondaryCommandBuffer; 65 enum class SkTextureCompressionType; 66 struct GrContextOptions; 67 struct GrVkDrawableInfo; 68 struct GrVkImageInfo; 69 struct SkIPoint; 70 struct SkIRect; 71 struct SkISize; 72 struct SkImageInfo; 73 74 namespace SkSurfaces { 75 enum class BackendSurfaceAccess; 76 } 77 78 namespace skgpu { 79 class MutableTextureState; 80 class VulkanMemoryAllocator; 81 struct VulkanBackendContext; 82 struct VulkanInterface; 83 } // namespace skgpu 84 85 class GrVkGpu : public GrGpu { 86 public: 87 static std::unique_ptr<GrGpu> Make(const skgpu::VulkanBackendContext&, 88 const GrContextOptions&, 89 GrDirectContext*); 90 91 ~GrVkGpu() override; 92 93 void disconnect(DisconnectType) override; disconnected()94 bool disconnected() const { return fDisconnected; } 95 releaseUnlockedBackendObjects()96 void releaseUnlockedBackendObjects() override { 97 fResourceProvider.releaseUnlockedBackendObjects(); 98 } 99 100 GrThreadSafePipelineBuilder* pipelineBuilder() override; 101 sk_sp<GrThreadSafePipelineBuilder> refPipelineBuilder() override; 102 vkInterface()103 const skgpu::VulkanInterface* vkInterface() const { return fInterface.get(); } vkCaps()104 const GrVkCaps& vkCaps() const { return *fVkCaps; } 105 stagingBufferManager()106 GrStagingBufferManager* stagingBufferManager() override { return &fStagingBufferManager; } 107 void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) override; 108 isDeviceLost()109 bool isDeviceLost() const override { return fDeviceIsLost; } 110 memoryAllocator()111 skgpu::VulkanMemoryAllocator* memoryAllocator() const { return fMemoryAllocator.get(); } 112 physicalDevice()113 VkPhysicalDevice physicalDevice() const { return fPhysicalDevice; } device()114 VkDevice device() const { return fDevice; } queue()115 VkQueue queue() const { return fQueue; } queueIndex()116 uint32_t queueIndex() const { return fQueueIndex; } cmdPool()117 GrVkCommandPool* cmdPool() const { return fMainCmdPool; } physicalDeviceProperties()118 const VkPhysicalDeviceProperties& physicalDeviceProperties() const { 119 return fPhysDevProps; 120 } physicalDeviceMemoryProperties()121 const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProperties() const { 122 return fPhysDevMemProps; 123 } protectedContext()124 bool protectedContext() const { return fProtectedContext == skgpu::Protected::kYes; } 125 resourceProvider()126 GrVkResourceProvider& resourceProvider() { return fResourceProvider; } 127 currentCommandBuffer()128 GrVkPrimaryCommandBuffer* currentCommandBuffer() const { return fMainCmdBuffer; } 129 130 void xferBarrier(GrRenderTarget*, GrXferBarrierType) override; 131 132 bool setBackendTextureState(const GrBackendTexture&, 133 const skgpu::MutableTextureState&, 134 skgpu::MutableTextureState* previousState, 135 sk_sp<skgpu::RefCntedCallback> finishedCallback) override; 136 137 bool setBackendRenderTargetState(const GrBackendRenderTarget&, 138 const skgpu::MutableTextureState&, 139 skgpu::MutableTextureState* previousState, 140 sk_sp<skgpu::RefCntedCallback> finishedCallback) override; 141 142 void deleteBackendTexture(const GrBackendTexture&) override; 143 144 bool compile(const GrProgramDesc&, const GrProgramInfo&) override; 145 146 #if defined(GPU_TEST_UTILS) 147 bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override; 148 149 GrBackendRenderTarget createTestingOnlyBackendRenderTarget(SkISize dimensions, 150 GrColorType, 151 int sampleCnt, 152 GrProtected) override; 153 void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override; 154 resetShaderCacheForTesting()155 void resetShaderCacheForTesting() const override { 156 fResourceProvider.resetShaderCacheForTesting(); 157 } 158 #endif 159 160 sk_sp<GrAttachment> makeStencilAttachment(const GrBackendFormat& /*colorFormat*/, 161 SkISize dimensions, int numStencilSamples) override; 162 getPreferredStencilFormat(const GrBackendFormat &)163 GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) override { 164 return GrBackendFormats::MakeVk(this->vkCaps().preferredStencilFormat()); 165 } 166 167 sk_sp<GrAttachment> makeMSAAAttachment(SkISize dimensions, 168 const GrBackendFormat& format, 169 int numSamples, 170 GrProtected isProtected, 171 GrMemoryless isMemoryless) override; 172 173 void addBufferMemoryBarrier(const GrManagedResource*, 174 VkPipelineStageFlags srcStageMask, 175 VkPipelineStageFlags dstStageMask, 176 bool byRegion, 177 VkBufferMemoryBarrier* barrier) const; 178 void addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask, 179 VkPipelineStageFlags dstStageMask, 180 bool byRegion, 181 VkBufferMemoryBarrier* barrier) const; 182 void addImageMemoryBarrier(const GrManagedResource*, 183 VkPipelineStageFlags srcStageMask, 184 VkPipelineStageFlags dstStageMask, 185 bool byRegion, 186 VkImageMemoryBarrier* barrier) const; 187 188 bool loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer, 189 const GrVkRenderPass& renderPass, 190 GrAttachment* dst, 191 GrVkImage* src, 192 const SkIRect& srcRect); 193 194 bool onRegenerateMipMapLevels(GrTexture* tex) override; 195 196 void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) override; 197 198 void submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer>); 199 200 void submit(GrOpsRenderPass*) override; 201 202 [[nodiscard]] std::unique_ptr<GrSemaphore> makeSemaphore(bool isOwned) override; 203 std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&, 204 GrSemaphoreWrapType, 205 GrWrapOwnership) override; 206 void insertSemaphore(GrSemaphore* semaphore) override; 207 void waitSemaphore(GrSemaphore* semaphore) override; 208 209 // These match the definitions in SkDrawable, from whence they came 210 typedef void* SubmitContext; 211 typedef void (*SubmitProc)(SubmitContext submitContext); 212 213 // Adds an SkDrawable::GpuDrawHandler that we will delete the next time we submit the primary 214 // command buffer to the gpu. 215 void addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable); 216 checkFinishedCallbacks()217 void checkFinishedCallbacks() override { fResourceProvider.checkCommandBuffers(); } 218 void finishOutstandingGpuWork() override; 219 220 std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override; 221 222 bool updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src, VkDeviceSize offset, 223 VkDeviceSize size); 224 225 bool zeroBuffer(sk_sp<GrGpuBuffer>); 226 227 enum PersistentCacheKeyType : uint32_t { 228 kShader_PersistentCacheKeyType = 0, 229 kPipelineCache_PersistentCacheKeyType = 1, 230 }; 231 232 void storeVkPipelineCacheData() override; 233 234 bool beginRenderPass(const GrVkRenderPass*, 235 sk_sp<const GrVkFramebuffer>, 236 const VkClearValue* colorClear, 237 const GrSurface*, 238 const SkIRect& renderPassBounds, 239 bool forSecondaryCB); 240 void endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin, const SkIRect& bounds); 241 242 // Returns true if VkResult indicates success and also checks for device lost or OOM. Every 243 // Vulkan call (and skgpu::VulkanMemoryAllocator call that returns VkResult) made on behalf of 244 // the GrVkGpu should be processed by this function so that we respond to OOMs and lost devices. 245 bool checkVkResult(VkResult); 246 247 private: 248 GrVkGpu(GrDirectContext*, 249 const skgpu::VulkanBackendContext&, 250 const sk_sp<GrVkCaps> caps, 251 sk_sp<const skgpu::VulkanInterface>, 252 uint32_t instanceVersion, 253 uint32_t physicalDeviceVersion, 254 sk_sp<skgpu::VulkanMemoryAllocator>); 255 256 void destroyResources(); 257 258 GrBackendTexture onCreateBackendTexture(SkISize dimensions, 259 const GrBackendFormat&, 260 GrRenderable, 261 skgpu::Mipmapped, 262 GrProtected, 263 std::string_view label) override; 264 GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions, 265 const GrBackendFormat&, 266 skgpu::Mipmapped, 267 GrProtected) override; 268 269 bool onClearBackendTexture(const GrBackendTexture&, 270 sk_sp<skgpu::RefCntedCallback> finishedCallback, 271 std::array<float, 4> color) override; 272 273 bool onUpdateCompressedBackendTexture(const GrBackendTexture&, 274 sk_sp<skgpu::RefCntedCallback> finishedCallback, 275 const void* data, 276 size_t length) override; 277 278 bool setBackendSurfaceState(GrVkImageInfo info, 279 sk_sp<skgpu::MutableTextureState> currentState, 280 SkISize dimensions, 281 VkImageLayout newLayout, 282 uint32_t newQueueFamilyIndex, 283 skgpu::MutableTextureState* previousState, 284 sk_sp<skgpu::RefCntedCallback> finishedCallback); 285 286 sk_sp<GrTexture> onCreateTexture(SkISize, 287 const GrBackendFormat&, 288 GrRenderable, 289 int renderTargetSampleCnt, 290 skgpu::Budgeted, 291 GrProtected, 292 int mipLevelCount, 293 uint32_t levelClearMask, 294 std::string_view label) override; 295 sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions, 296 const GrBackendFormat&, 297 skgpu::Budgeted, 298 skgpu::Mipmapped, 299 GrProtected, 300 const void* data, 301 size_t dataSize) override; 302 303 sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, 304 GrWrapOwnership, 305 GrWrapCacheable, 306 GrIOType) override; 307 sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&, 308 GrWrapOwnership, 309 GrWrapCacheable) override; 310 sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&, 311 int sampleCnt, 312 GrWrapOwnership, 313 GrWrapCacheable) override; 314 sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) override; 315 316 sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&, 317 const GrVkDrawableInfo&) override; 318 319 sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern) override; 320 321 bool onReadPixels(GrSurface*, 322 SkIRect, 323 GrColorType surfaceColorType, 324 GrColorType dstColorType, 325 void* buffer, 326 size_t rowBytes) override; 327 328 bool onWritePixels(GrSurface*, 329 SkIRect, 330 GrColorType surfaceColorType, 331 GrColorType srcColorType, 332 const GrMipLevel[], 333 int mipLevelCount, 334 bool prepForTexSampling) override; 335 336 bool onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src, 337 size_t srcOffset, 338 sk_sp<GrGpuBuffer> dst, 339 size_t dstOffset, 340 size_t size) override; 341 342 bool onTransferPixelsTo(GrTexture*, 343 SkIRect, 344 GrColorType textureColorType, 345 GrColorType bufferColorType, 346 sk_sp<GrGpuBuffer>, 347 size_t offset, 348 size_t rowBytes) override; 349 350 bool onTransferPixelsFrom(GrSurface*, 351 SkIRect, 352 GrColorType surfaceColorType, 353 GrColorType bufferColorType, 354 sk_sp<GrGpuBuffer>, 355 size_t offset) override; 356 357 bool onCopySurface(GrSurface* dst, const SkIRect& dstRect, 358 GrSurface* src, const SkIRect& srcRect, 359 GrSamplerState::Filter) override; 360 addFinishedCallback(skgpu::AutoCallback callback,std::optional<GrTimerQuery> timerQuery)361 void addFinishedCallback(skgpu::AutoCallback callback, 362 std::optional<GrTimerQuery> timerQuery) override { 363 SkASSERT(!timerQuery); 364 this->addFinishedCallback(skgpu::RefCntedCallback::Make(std::move(callback))); 365 } 366 367 void addFinishedCallback(sk_sp<skgpu::RefCntedCallback> finishedCallback); 368 369 GrOpsRenderPass* onGetOpsRenderPass(GrRenderTarget*, 370 bool useMSAASurface, 371 GrAttachment* stencil, 372 GrSurfaceOrigin, 373 const SkIRect&, 374 const GrOpsRenderPass::LoadAndStoreInfo&, 375 const GrOpsRenderPass::StencilLoadAndStoreInfo&, 376 const skia_private::TArray<GrSurfaceProxy*, true>& sampledProxies, 377 GrXferBarrierFlags renderPassXferBarriers) override; 378 379 void prepareSurfacesForBackendAccessAndStateUpdates( 380 SkSpan<GrSurfaceProxy*> proxies, 381 SkSurfaces::BackendSurfaceAccess access, 382 const skgpu::MutableTextureState* newState) override; 383 384 bool onSubmitToGpu(const GrSubmitInfo& info) override; 385 386 void onReportSubmitHistograms() override; 387 388 // Ends and submits the current command buffer to the queue and then creates a new command 389 // buffer and begins it. If fSync in the submitInfo is set to GrSyncCpu::kYes, the function will 390 // wait for all work in the queue to finish before returning. If this GrVkGpu object has any 391 // semaphores in fSemaphoreToSignal, we will add those signal semaphores to the submission of 392 // this command buffer. If this GrVkGpu object has any semaphores in fSemaphoresToWaitOn, we 393 // will add those wait semaphores to the submission of this command buffer. 394 // 395 // If fMarkBoundary in submitInfo is GrMarkFrameBoundary::kYes, then we will mark the end of a 396 // frame if the VK_EXT_frame_boundary extension is available. 397 bool submitCommandBuffer(const GrSubmitInfo& submitInfo); 398 399 void copySurfaceAsCopyImage(GrSurface* dst, 400 GrSurface* src, 401 GrVkImage* dstImage, 402 GrVkImage* srcImage, 403 const SkIRect& srcRect, 404 const SkIPoint& dstPoint); 405 406 void copySurfaceAsBlit(GrSurface* dst, 407 GrSurface* src, 408 GrVkImage* dstImage, 409 GrVkImage* srcImage, 410 const SkIRect& srcRect, 411 const SkIRect& dstRect, 412 GrSamplerState::Filter filter); 413 414 void copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, 415 const SkIPoint& dstPoint); 416 417 // helpers for onCreateTexture and writeTexturePixels 418 bool uploadTexDataLinear(GrVkImage* tex, 419 SkIRect rect, 420 GrColorType colorType, 421 const void* data, 422 size_t rowBytes); 423 bool uploadTexDataOptimal(GrVkImage* tex, 424 SkIRect rect, 425 GrColorType colorType, 426 const GrMipLevel texels[], 427 int mipLevelCount); 428 bool uploadTexDataCompressed(GrVkImage* tex, 429 SkTextureCompressionType compression, 430 VkFormat vkFormat, 431 SkISize dimensions, 432 skgpu::Mipmapped mipmapped, 433 const void* data, 434 size_t dataSize); 435 void resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect, 436 const SkIPoint& dstPoint); 437 438 bool createVkImageForBackendSurface(VkFormat, 439 SkISize dimensions, 440 int sampleCnt, 441 GrTexturable, 442 GrRenderable, 443 skgpu::Mipmapped, 444 GrVkImageInfo*, 445 GrProtected); 446 447 sk_sp<const skgpu::VulkanInterface> fInterface; 448 sk_sp<skgpu::VulkanMemoryAllocator> fMemoryAllocator; 449 sk_sp<GrVkCaps> fVkCaps; 450 bool fDeviceIsLost = false; 451 452 VkPhysicalDevice fPhysicalDevice; 453 VkDevice fDevice; 454 VkQueue fQueue; // Must be Graphics queue 455 uint32_t fQueueIndex; 456 457 // Created by GrVkGpu 458 GrVkResourceProvider fResourceProvider; 459 GrStagingBufferManager fStagingBufferManager; 460 461 GrVkMSAALoadManager fMSAALoadManager; 462 463 GrVkCommandPool* fMainCmdPool; 464 // just a raw pointer; object's lifespan is managed by fCmdPool 465 GrVkPrimaryCommandBuffer* fMainCmdBuffer; 466 467 skia_private::STArray<1, GrVkSemaphore::Resource*> fSemaphoresToWaitOn; 468 skia_private::STArray<1, GrVkSemaphore::Resource*> fSemaphoresToSignal; 469 470 skia_private::TArray<std::unique_ptr<SkDrawable::GpuDrawHandler>> fDrawables; 471 472 VkPhysicalDeviceProperties fPhysDevProps; 473 VkPhysicalDeviceMemoryProperties fPhysDevMemProps; 474 475 // We need a bool to track whether or not we've already disconnected all the gpu resources from 476 // vulkan context. 477 bool fDisconnected; 478 479 skgpu::Protected fProtectedContext; 480 481 std::unique_ptr<GrVkOpsRenderPass> fCachedOpsRenderPass; 482 483 skgpu::VulkanDeviceLostContext fDeviceLostContext; 484 skgpu::VulkanDeviceLostProc fDeviceLostProc; 485 486 using INHERITED = GrGpu; 487 }; 488 489 #endif 490