xref: /aosp_15_r20/external/skia/src/gpu/ganesh/vk/GrVkGpu.cpp (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/ganesh/vk/GrVkGpu.h"
9 
10 #include "include/core/SkImageInfo.h"
11 #include "include/core/SkPoint.h"
12 #include "include/core/SkRect.h"
13 #include "include/core/SkSamplingOptions.h"
14 #include "include/core/SkSize.h"
15 #include "include/core/SkSurface.h"
16 #include "include/core/SkTextureCompressionType.h"
17 #include "include/core/SkTypes.h"
18 #include "include/gpu/GpuTypes.h"
19 #include "include/gpu/MutableTextureState.h"
20 #include "include/gpu/ganesh/GrBackendSurface.h"
21 #include "include/gpu/ganesh/GrDirectContext.h"
22 #include "include/gpu/ganesh/vk/GrVkBackendSemaphore.h"
23 #include "include/gpu/ganesh/vk/GrVkBackendSurface.h"
24 #include "include/gpu/ganesh/vk/GrVkTypes.h"
25 #include "include/gpu/vk/VulkanBackendContext.h"
26 #include "include/gpu/vk/VulkanExtensions.h"
27 #include "include/gpu/vk/VulkanMemoryAllocator.h"
28 #include "include/gpu/vk/VulkanMutableTextureState.h"
29 #include "include/gpu/vk/VulkanTypes.h"
30 #include "include/private/base/SkDebug.h"
31 #include "include/private/base/SkTemplates.h"
32 #include "include/private/base/SkTo.h"
33 #include "include/private/gpu/vk/SkiaVulkan.h"
34 #include "src/base/SkRectMemcpy.h"
35 #include "src/core/SkCompressedDataUtils.h"
36 #include "src/core/SkMipmap.h"
37 #include "src/core/SkTraceEvent.h"
38 #include "src/gpu/DataUtils.h"
39 #include "src/gpu/RefCntedCallback.h"
40 #include "src/gpu/ganesh/GrBackendUtils.h"
41 #include "src/gpu/ganesh/GrBuffer.h"
42 #include "src/gpu/ganesh/GrCaps.h"
43 #include "src/gpu/ganesh/GrDataUtils.h"
44 #include "src/gpu/ganesh/GrDirectContextPriv.h"
45 #include "src/gpu/ganesh/GrGpuBuffer.h"
46 #include "src/gpu/ganesh/GrImageInfo.h"
47 #include "src/gpu/ganesh/GrPixmap.h"
48 #include "src/gpu/ganesh/GrProgramInfo.h"
49 #include "src/gpu/ganesh/GrRenderTarget.h"
50 #include "src/gpu/ganesh/GrResourceProvider.h"
51 #include "src/gpu/ganesh/GrSurface.h"
52 #include "src/gpu/ganesh/GrSurfaceProxy.h"
53 #include "src/gpu/ganesh/GrTexture.h"
54 #include "src/gpu/ganesh/GrThreadSafePipelineBuilder.h"
55 #include "src/gpu/ganesh/vk/GrVkBuffer.h"
56 #include "src/gpu/ganesh/vk/GrVkCommandBuffer.h"
57 #include "src/gpu/ganesh/vk/GrVkCommandPool.h"
58 #include "src/gpu/ganesh/vk/GrVkFramebuffer.h"
59 #include "src/gpu/ganesh/vk/GrVkImage.h"
60 #include "src/gpu/ganesh/vk/GrVkOpsRenderPass.h"
61 #include "src/gpu/ganesh/vk/GrVkRenderPass.h"
62 #include "src/gpu/ganesh/vk/GrVkRenderTarget.h"
63 #include "src/gpu/ganesh/vk/GrVkResourceProvider.h"
64 #include "src/gpu/ganesh/vk/GrVkSemaphore.h"
65 #include "src/gpu/ganesh/vk/GrVkTexture.h"
66 #include "src/gpu/ganesh/vk/GrVkTextureRenderTarget.h"
67 #include "src/gpu/ganesh/vk/GrVkUtil.h"
68 #include "src/gpu/vk/VulkanInterface.h"
69 #include "src/gpu/vk/VulkanMemory.h"
70 #include "src/gpu/vk/VulkanUtilsPriv.h"
71 
72 #include <algorithm>
73 #include <cstring>
74 #include <functional>
75 #include <utility>
76 
77 class GrAttachment;
78 class GrBackendSemaphore;
79 class GrManagedResource;
80 class GrProgramDesc;
81 class GrSemaphore;
82 struct GrContextOptions;
83 
84 #if defined(SK_USE_VMA)
85 #include "src/gpu/vk/vulkanmemoryallocator/VulkanMemoryAllocatorPriv.h"
86 #endif
87 
88 using namespace skia_private;
89 
90 #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
91 #define VK_CALL_RET(RET, X) GR_VK_CALL_RESULT(this, RET, X)
92 
Make(const skgpu::VulkanBackendContext & backendContext,const GrContextOptions & options,GrDirectContext * direct)93 std::unique_ptr<GrGpu> GrVkGpu::Make(const skgpu::VulkanBackendContext& backendContext,
94                                      const GrContextOptions& options,
95                                      GrDirectContext* direct) {
96     if (backendContext.fInstance == VK_NULL_HANDLE ||
97         backendContext.fPhysicalDevice == VK_NULL_HANDLE ||
98         backendContext.fDevice == VK_NULL_HANDLE ||
99         backendContext.fQueue == VK_NULL_HANDLE) {
100         return nullptr;
101     }
102     if (!backendContext.fGetProc) {
103         return nullptr;
104     }
105 
106     skgpu::VulkanExtensions ext;
107     const skgpu::VulkanExtensions* extensions = &ext;
108     if (backendContext.fVkExtensions) {
109         extensions = backendContext.fVkExtensions;
110     }
111 
112     uint32_t instanceVersion = 0;
113     uint32_t physDevVersion = 0;
114     sk_sp<const skgpu::VulkanInterface> interface =
115             skgpu::MakeInterface(backendContext, extensions, &instanceVersion, &physDevVersion);
116     if (!interface) {
117         return nullptr;
118     }
119 
120     sk_sp<GrVkCaps> caps;
121     if (backendContext.fDeviceFeatures2) {
122         caps.reset(new GrVkCaps(options,
123                                 interface.get(),
124                                 backendContext.fPhysicalDevice,
125                                 *backendContext.fDeviceFeatures2,
126                                 instanceVersion,
127                                 physDevVersion,
128                                 *extensions,
129                                 backendContext.fProtectedContext));
130     } else if (backendContext.fDeviceFeatures) {
131         VkPhysicalDeviceFeatures2 features2;
132         features2.pNext = nullptr;
133         features2.features = *backendContext.fDeviceFeatures;
134         caps.reset(new GrVkCaps(options,
135                                 interface.get(),
136                                 backendContext.fPhysicalDevice,
137                                 features2,
138                                 instanceVersion,
139                                 physDevVersion,
140                                 *extensions,
141                                 backendContext.fProtectedContext));
142     } else {
143         VkPhysicalDeviceFeatures2 features;
144         memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
145         caps.reset(new GrVkCaps(options,
146                                 interface.get(),
147                                 backendContext.fPhysicalDevice,
148                                 features,
149                                 instanceVersion,
150                                 physDevVersion,
151                                 *extensions,
152                                 backendContext.fProtectedContext));
153     }
154 
155     if (!caps) {
156         return nullptr;
157     }
158 
159     sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator = backendContext.fMemoryAllocator;
160 #if defined(SK_USE_VMA)
161     if (!memoryAllocator) {
162         // We were not given a memory allocator at creation
163         memoryAllocator =
164                 skgpu::VulkanMemoryAllocators::Make(backendContext,
165                                                     skgpu::ThreadSafe::kNo,
166                                                     options.fVulkanVMALargeHeapBlockSize);
167     }
168 #endif
169     if (!memoryAllocator) {
170         SkDEBUGFAIL("No supplied vulkan memory allocator and unable to create one internally.");
171         return nullptr;
172     }
173 
174     std::unique_ptr<GrVkGpu> vkGpu(new GrVkGpu(direct,
175                                                backendContext,
176                                                std::move(caps),
177                                                interface,
178                                                instanceVersion,
179                                                physDevVersion,
180                                                std::move(memoryAllocator)));
181     if (backendContext.fProtectedContext == GrProtected::kYes &&
182         !vkGpu->vkCaps().supportsProtectedContent()) {
183         return nullptr;
184     }
185     return vkGpu;
186 }
187 
188 ////////////////////////////////////////////////////////////////////////////////
189 
GrVkGpu(GrDirectContext * direct,const skgpu::VulkanBackendContext & backendContext,sk_sp<GrVkCaps> caps,sk_sp<const skgpu::VulkanInterface> interface,uint32_t instanceVersion,uint32_t physicalDeviceVersion,sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator)190 GrVkGpu::GrVkGpu(GrDirectContext* direct,
191                  const skgpu::VulkanBackendContext& backendContext,
192                  sk_sp<GrVkCaps> caps,
193                  sk_sp<const skgpu::VulkanInterface> interface,
194                  uint32_t instanceVersion,
195                  uint32_t physicalDeviceVersion,
196                  sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator)
197         : INHERITED(direct)
198         , fInterface(std::move(interface))
199         , fMemoryAllocator(std::move(memoryAllocator))
200         , fVkCaps(std::move(caps))
201         , fPhysicalDevice(backendContext.fPhysicalDevice)
202         , fDevice(backendContext.fDevice)
203         , fQueue(backendContext.fQueue)
204         , fQueueIndex(backendContext.fGraphicsQueueIndex)
205         , fResourceProvider(this)
206         , fStagingBufferManager(this)
207         , fDisconnected(false)
208         , fProtectedContext(backendContext.fProtectedContext)
209         , fDeviceLostContext(backendContext.fDeviceLostContext)
210         , fDeviceLostProc(backendContext.fDeviceLostProc) {
211     SkASSERT(fMemoryAllocator);
212 
213     this->initCaps(fVkCaps);
214 
215     VK_CALL(GetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &fPhysDevProps));
216     VK_CALL(GetPhysicalDeviceMemoryProperties(backendContext.fPhysicalDevice, &fPhysDevMemProps));
217 
218     fResourceProvider.init();
219 
220     fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
221     if (fMainCmdPool) {
222         fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
223         SkASSERT(this->currentCommandBuffer());
224         this->currentCommandBuffer()->begin(this);
225     }
226 }
227 
destroyResources()228 void GrVkGpu::destroyResources() {
229     if (fMainCmdPool) {
230         fMainCmdPool->getPrimaryCommandBuffer()->end(this, /*abandoningBuffer=*/true);
231         fMainCmdPool->close();
232     }
233 
234     // wait for all commands to finish
235     this->finishOutstandingGpuWork();
236 
237     if (fMainCmdPool) {
238         fMainCmdPool->unref();
239         fMainCmdPool = nullptr;
240     }
241 
242     for (int i = 0; i < fSemaphoresToWaitOn.size(); ++i) {
243         fSemaphoresToWaitOn[i]->unref();
244     }
245     fSemaphoresToWaitOn.clear();
246 
247     for (int i = 0; i < fSemaphoresToSignal.size(); ++i) {
248         fSemaphoresToSignal[i]->unref();
249     }
250     fSemaphoresToSignal.clear();
251 
252     fStagingBufferManager.reset();
253 
254     fMSAALoadManager.destroyResources(this);
255 
256     // must call this just before we destroy the command pool and VkDevice
257     fResourceProvider.destroyResources();
258 }
259 
~GrVkGpu()260 GrVkGpu::~GrVkGpu() {
261     if (!fDisconnected) {
262         this->destroyResources();
263     }
264     // We don't delete the memory allocator until the very end of the GrVkGpu lifetime so that
265     // clients can continue to delete backend textures even after a context has been abandoned.
266     fMemoryAllocator.reset();
267 }
268 
269 
disconnect(DisconnectType type)270 void GrVkGpu::disconnect(DisconnectType type) {
271     INHERITED::disconnect(type);
272     if (!fDisconnected) {
273         this->destroyResources();
274 
275         fSemaphoresToWaitOn.clear();
276         fSemaphoresToSignal.clear();
277         fMainCmdBuffer = nullptr;
278         fDisconnected = true;
279     }
280 }
281 
pipelineBuilder()282 GrThreadSafePipelineBuilder* GrVkGpu::pipelineBuilder() {
283     return fResourceProvider.pipelineStateCache();
284 }
285 
refPipelineBuilder()286 sk_sp<GrThreadSafePipelineBuilder> GrVkGpu::refPipelineBuilder() {
287     return fResourceProvider.refPipelineStateCache();
288 }
289 
290 ///////////////////////////////////////////////////////////////////////////////
291 
onGetOpsRenderPass(GrRenderTarget * rt,bool useMSAASurface,GrAttachment * stencil,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const TArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)292 GrOpsRenderPass* GrVkGpu::onGetOpsRenderPass(
293         GrRenderTarget* rt,
294         bool useMSAASurface,
295         GrAttachment* stencil,
296         GrSurfaceOrigin origin,
297         const SkIRect& bounds,
298         const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
299         const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
300         const TArray<GrSurfaceProxy*, true>& sampledProxies,
301         GrXferBarrierFlags renderPassXferBarriers) {
302     if (!fCachedOpsRenderPass) {
303         fCachedOpsRenderPass = std::make_unique<GrVkOpsRenderPass>(this);
304     }
305 
306     // For the given render target and requested render pass features we need to find a compatible
307     // framebuffer to use for the render pass. Technically it is the underlying VkRenderPass that
308     // is compatible, but that is part of the framebuffer that we get here.
309     GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
310 
311     SkASSERT(!useMSAASurface ||
312              rt->numSamples() > 1 ||
313              (this->vkCaps().supportsDiscardableMSAAForDMSAA() &&
314               vkRT->resolveAttachment() &&
315               vkRT->resolveAttachment()->supportsInputAttachmentUsage()));
316 
317     // Covert the GrXferBarrierFlags into render pass self dependency flags
318     GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
319     if (renderPassXferBarriers & GrXferBarrierFlags::kBlend) {
320         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend;
321     }
322     if (renderPassXferBarriers & GrXferBarrierFlags::kTexture) {
323         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment;
324     }
325 
326     // Figure out if we need a resolve attachment for this render pass. A resolve attachment is
327     // needed if we are using msaa to draw with a discardable msaa attachment. If we are in this
328     // case we also need to update the color load/store ops since we don't want to ever load or
329     // store the msaa color attachment, but may need to for the resolve attachment.
330     GrOpsRenderPass::LoadAndStoreInfo localColorInfo = colorInfo;
331     bool withResolve = false;
332     GrVkRenderPass::LoadFromResolve loadFromResolve = GrVkRenderPass::LoadFromResolve::kNo;
333     GrOpsRenderPass::LoadAndStoreInfo resolveInfo{GrLoadOp::kLoad, GrStoreOp::kStore, {}};
334     if (useMSAASurface && this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
335         withResolve = true;
336         localColorInfo.fStoreOp = GrStoreOp::kDiscard;
337         if (colorInfo.fLoadOp == GrLoadOp::kLoad) {
338             loadFromResolve = GrVkRenderPass::LoadFromResolve::kLoad;
339             localColorInfo.fLoadOp = GrLoadOp::kDiscard;
340         } else {
341             resolveInfo.fLoadOp = GrLoadOp::kDiscard;
342         }
343     }
344 
345     // Get the framebuffer to use for the render pass
346    sk_sp<GrVkFramebuffer> framebuffer;
347     if (vkRT->wrapsSecondaryCommandBuffer()) {
348         framebuffer = vkRT->externalFramebuffer();
349     } else {
350         auto fb = vkRT->getFramebuffer(withResolve, SkToBool(stencil), selfDepFlags,
351                                        loadFromResolve);
352         framebuffer = sk_ref_sp(fb);
353     }
354     if (!framebuffer) {
355         return nullptr;
356     }
357 
358     if (!fCachedOpsRenderPass->set(rt, std::move(framebuffer), origin, bounds, localColorInfo,
359                                    stencilInfo, resolveInfo, selfDepFlags, loadFromResolve,
360                                    sampledProxies)) {
361         return nullptr;
362     }
363     return fCachedOpsRenderPass.get();
364 }
365 
submitCommandBuffer(const GrSubmitInfo & submitInfo)366 bool GrVkGpu::submitCommandBuffer(const GrSubmitInfo& submitInfo) {
367     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
368     if (!this->currentCommandBuffer()) {
369         return false;
370     }
371     SkASSERT(!fCachedOpsRenderPass || !fCachedOpsRenderPass->isActive());
372 
373     if (!this->currentCommandBuffer()->hasWork() && submitInfo.fSync == GrSyncCpu::kNo &&
374         fSemaphoresToSignal.empty() && fSemaphoresToWaitOn.empty()) {
375         // We may have added finished procs during the flush call. Since there is no actual work
376         // we are not submitting the command buffer and may never come back around to submit it.
377         // Thus we call all current finished procs manually, since the work has technically
378         // finished.
379         this->currentCommandBuffer()->callFinishedProcs();
380         SkASSERT(fDrawables.empty());
381         fResourceProvider.checkCommandBuffers();
382         return true;
383     }
384 
385     fMainCmdBuffer->end(this);
386     SkASSERT(fMainCmdPool);
387     fMainCmdPool->close();
388     bool didSubmit = fMainCmdBuffer->submitToQueue(this, fQueue, fSemaphoresToSignal,
389                                                    fSemaphoresToWaitOn, submitInfo);
390 
391     if (didSubmit && submitInfo.fSync == GrSyncCpu::kYes) {
392         fMainCmdBuffer->forceSync(this);
393     }
394 
395     // We must delete any drawables that had to wait until submit to destroy.
396     fDrawables.clear();
397 
398     // If we didn't submit the command buffer then we did not wait on any semaphores. We will
399     // continue to hold onto these semaphores and wait on them during the next command buffer
400     // submission.
401     if (didSubmit) {
402         for (int i = 0; i < fSemaphoresToWaitOn.size(); ++i) {
403             fSemaphoresToWaitOn[i]->unref();
404         }
405         fSemaphoresToWaitOn.clear();
406     }
407 
408     // Even if we did not submit the command buffer, we drop all the signal semaphores since we will
409     // not try to recover the work that wasn't submitted and instead just drop it all. The client
410     // will be notified that the semaphores were not submit so that they will not try to wait on
411     // them.
412     for (int i = 0; i < fSemaphoresToSignal.size(); ++i) {
413         fSemaphoresToSignal[i]->unref();
414     }
415     fSemaphoresToSignal.clear();
416 
417     // Release old command pool and create a new one
418     fMainCmdPool->unref();
419     fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
420     if (fMainCmdPool) {
421         fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
422         SkASSERT(fMainCmdBuffer);
423         fMainCmdBuffer->begin(this);
424     } else {
425         fMainCmdBuffer = nullptr;
426     }
427     // We must wait to call checkCommandBuffers until after we get a new command buffer. The
428     // checkCommandBuffers may trigger a releaseProc which may cause us to insert a barrier for a
429     // released GrVkImage. That barrier needs to be put into a new command buffer and not the old
430     // one that was just submitted.
431     fResourceProvider.checkCommandBuffers();
432     return didSubmit;
433 }
434 
435 ///////////////////////////////////////////////////////////////////////////////
onCreateBuffer(size_t size,GrGpuBufferType type,GrAccessPattern accessPattern)436 sk_sp<GrGpuBuffer> GrVkGpu::onCreateBuffer(size_t size,
437                                            GrGpuBufferType type,
438                                            GrAccessPattern accessPattern) {
439 #ifdef SK_DEBUG
440     switch (type) {
441         case GrGpuBufferType::kVertex:
442         case GrGpuBufferType::kIndex:
443         case GrGpuBufferType::kDrawIndirect:
444             SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
445                      accessPattern == kStatic_GrAccessPattern);
446             break;
447         case GrGpuBufferType::kXferCpuToGpu:
448             SkASSERT(accessPattern == kDynamic_GrAccessPattern);
449             break;
450         case GrGpuBufferType::kXferGpuToCpu:
451             SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
452                      accessPattern == kStream_GrAccessPattern);
453             break;
454         case GrGpuBufferType::kUniform:
455             SkASSERT(accessPattern == kDynamic_GrAccessPattern);
456             break;
457     }
458 #endif
459     return GrVkBuffer::Make(this, size, type, accessPattern);
460 }
461 
onWritePixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)462 bool GrVkGpu::onWritePixels(GrSurface* surface,
463                             SkIRect rect,
464                             GrColorType surfaceColorType,
465                             GrColorType srcColorType,
466                             const GrMipLevel texels[],
467                             int mipLevelCount,
468                             bool prepForTexSampling) {
469     GrVkTexture* texture = static_cast<GrVkTexture*>(surface->asTexture());
470     if (!texture) {
471         return false;
472     }
473     GrVkImage* texImage = texture->textureImage();
474 
475     // Make sure we have at least the base level
476     if (!mipLevelCount || !texels[0].fPixels) {
477         return false;
478     }
479 
480     SkASSERT(!skgpu::VkFormatIsCompressed(texImage->imageFormat()));
481     bool success = false;
482     bool linearTiling = texImage->isLinearTiled();
483     if (linearTiling) {
484         if (mipLevelCount > 1) {
485             SkDebugf("Can't upload mipmap data to linear tiled texture");
486             return false;
487         }
488         if (VK_IMAGE_LAYOUT_PREINITIALIZED != texImage->currentLayout()) {
489             // Need to change the layout to general in order to perform a host write
490             texImage->setImageLayout(this,
491                                      VK_IMAGE_LAYOUT_GENERAL,
492                                      VK_ACCESS_HOST_WRITE_BIT,
493                                      VK_PIPELINE_STAGE_HOST_BIT,
494                                      false);
495             GrSubmitInfo submitInfo;
496             submitInfo.fSync = GrSyncCpu::kYes;
497             if (!this->submitCommandBuffer(submitInfo)) {
498                 return false;
499             }
500         }
501         success = this->uploadTexDataLinear(texImage,
502                                             rect,
503                                             srcColorType,
504                                             texels[0].fPixels,
505                                             texels[0].fRowBytes);
506     } else {
507         SkASSERT(mipLevelCount <= (int)texImage->mipLevels());
508         success = this->uploadTexDataOptimal(texImage,
509                                              rect,
510                                              srcColorType,
511                                              texels,
512                                              mipLevelCount);
513         if (1 == mipLevelCount) {
514             texture->markMipmapsDirty();
515         }
516     }
517 
518     if (prepForTexSampling) {
519         texImage->setImageLayout(this,
520                                       VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
521                                       VK_ACCESS_SHADER_READ_BIT,
522                                       VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
523                                       false);
524     }
525 
526     return success;
527 }
528 
529 // When we update vertex/index buffers via transfers we assume that they may have been used
530 // previously in draws and will be used again in draws afterwards. So we put a barrier before and
531 // after. If we had a mechanism for gathering the buffers that will be used in a GrVkOpsRenderPass
532 // *before* we begin a subpass we could do this lazily and non-redundantly by tracking the "last
533 // usage" on the GrVkBuffer. Then Pass 1 draw, xfer, xfer, xfer, Pass 2 draw  would insert just two
534 // barriers: one before the first xfer and one before Pass 2. Currently, we'd use six barriers.
535 // Pass false as "after" before the transfer and true after the transfer.
add_transfer_dst_buffer_mem_barrier(GrVkGpu * gpu,GrVkBuffer * dst,size_t offset,size_t size,bool after)536 static void add_transfer_dst_buffer_mem_barrier(GrVkGpu* gpu,
537                                                 GrVkBuffer* dst,
538                                                 size_t offset,
539                                                 size_t size,
540                                                 bool after) {
541     if (dst->intendedType() != GrGpuBufferType::kIndex &&
542         dst->intendedType() != GrGpuBufferType::kVertex) {
543         return;
544     }
545 
546     VkAccessFlags srcAccessMask = dst->intendedType() == GrGpuBufferType::kIndex
547                                           ? VK_ACCESS_INDEX_READ_BIT
548                                           : VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
549     VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
550 
551     VkPipelineStageFlagBits srcPipelineStageFlags = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
552     VkPipelineStageFlagBits dstPipelineStageFlags = VK_PIPELINE_STAGE_TRANSFER_BIT;
553 
554     if (after) {
555         using std::swap;
556         swap(srcAccessMask,         dstAccessMask        );
557         swap(srcPipelineStageFlags, dstPipelineStageFlags);
558     }
559 
560     VkBufferMemoryBarrier bufferMemoryBarrier = {
561             VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,  // sType
562             nullptr,                                  // pNext
563             srcAccessMask,                            // srcAccessMask
564             dstAccessMask,                            // dstAccessMask
565             VK_QUEUE_FAMILY_IGNORED,                  // srcQueueFamilyIndex
566             VK_QUEUE_FAMILY_IGNORED,                  // dstQueueFamilyIndex
567             dst->vkBuffer(),                          // buffer
568             offset,                                   // offset
569             size,                                     // size
570     };
571 
572     gpu->addBufferMemoryBarrier(srcPipelineStageFlags,
573                                 dstPipelineStageFlags,
574                                 /*byRegion=*/false,
575                                 &bufferMemoryBarrier);
576 }
577 
onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,size_t srcOffset,sk_sp<GrGpuBuffer> dst,size_t dstOffset,size_t size)578 bool GrVkGpu::onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
579                                            size_t srcOffset,
580                                            sk_sp<GrGpuBuffer> dst,
581                                            size_t dstOffset,
582                                            size_t size) {
583     if (!this->currentCommandBuffer()) {
584         return false;
585     }
586 
587     VkBufferCopy copyRegion;
588     copyRegion.srcOffset = srcOffset;
589     copyRegion.dstOffset = dstOffset;
590     copyRegion.size = size;
591 
592     add_transfer_dst_buffer_mem_barrier(this,
593                                         static_cast<GrVkBuffer*>(dst.get()),
594                                         dstOffset,
595                                         size,
596                                         /*after=*/false);
597     this->currentCommandBuffer()->copyBuffer(this, std::move(src), dst, 1, &copyRegion);
598     add_transfer_dst_buffer_mem_barrier(this,
599                                         static_cast<GrVkBuffer*>(dst.get()),
600                                         dstOffset,
601                                         size,
602                                         /*after=*/true);
603 
604     return true;
605 }
606 
onTransferPixelsTo(GrTexture * texture,SkIRect rect,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t bufferOffset,size_t rowBytes)607 bool GrVkGpu::onTransferPixelsTo(GrTexture* texture,
608                                  SkIRect rect,
609                                  GrColorType surfaceColorType,
610                                  GrColorType bufferColorType,
611                                  sk_sp<GrGpuBuffer> transferBuffer,
612                                  size_t bufferOffset,
613                                  size_t rowBytes) {
614     if (!this->currentCommandBuffer()) {
615         return false;
616     }
617 
618     size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
619     if (GrBackendFormatBytesPerPixel(texture->backendFormat()) != bpp) {
620         return false;
621     }
622 
623     // Vulkan only supports offsets that are both 4-byte aligned and aligned to a pixel.
624     if ((bufferOffset & 0x3) || (bufferOffset % bpp)) {
625         return false;
626     }
627     GrVkTexture* tex = static_cast<GrVkTexture*>(texture);
628     if (!tex) {
629         return false;
630     }
631     GrVkImage* vkImage = tex->textureImage();
632     VkFormat format = vkImage->imageFormat();
633 
634     // Can't transfer compressed data
635     SkASSERT(!skgpu::VkFormatIsCompressed(format));
636 
637     if (!transferBuffer) {
638         return false;
639     }
640 
641     if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
642         return false;
643     }
644     SkASSERT(skgpu::VkFormatBytesPerBlock(format) == GrColorTypeBytesPerPixel(bufferColorType));
645 
646     SkASSERT(SkIRect::MakeSize(texture->dimensions()).contains(rect));
647 
648     // Set up copy region
649     VkBufferImageCopy region;
650     memset(&region, 0, sizeof(VkBufferImageCopy));
651     region.bufferOffset = bufferOffset;
652     region.bufferRowLength = (uint32_t)(rowBytes/bpp);
653     region.bufferImageHeight = 0;
654     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
655     region.imageOffset = { rect.left(), rect.top(), 0 };
656     region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
657 
658     // Change layout of our target so it can be copied to
659     vkImage->setImageLayout(this,
660                             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
661                             VK_ACCESS_TRANSFER_WRITE_BIT,
662                             VK_PIPELINE_STAGE_TRANSFER_BIT,
663                             false);
664 
665     const GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
666 
667     // Copy the buffer to the image.
668     this->currentCommandBuffer()->copyBufferToImage(this,
669                                                     vkBuffer->vkBuffer(),
670                                                     vkImage,
671                                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
672                                                     1,
673                                                     &region);
674     this->currentCommandBuffer()->addGrBuffer(std::move(transferBuffer));
675 
676     tex->markMipmapsDirty();
677     return true;
678 }
679 
onTransferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset)680 bool GrVkGpu::onTransferPixelsFrom(GrSurface* surface,
681                                    SkIRect rect,
682                                    GrColorType surfaceColorType,
683                                    GrColorType bufferColorType,
684                                    sk_sp<GrGpuBuffer> transferBuffer,
685                                    size_t offset) {
686     if (!this->currentCommandBuffer()) {
687         return false;
688     }
689     SkASSERT(surface);
690     SkASSERT(transferBuffer);
691     if (fProtectedContext == GrProtected::kYes) {
692         return false;
693     }
694 
695     GrVkImage* srcImage;
696     if (GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget())) {
697         // Reading from render targets that wrap a secondary command buffer is not allowed since
698         // it would require us to know the VkImage, which we don't have, as well as need us to
699         // stop and start the VkRenderPass which we don't have access to.
700         if (rt->wrapsSecondaryCommandBuffer()) {
701             return false;
702         }
703         if (!rt->nonMSAAAttachment()) {
704             return false;
705         }
706         srcImage = rt->nonMSAAAttachment();
707     } else {
708         SkASSERT(surface->asTexture());
709         srcImage = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
710     }
711 
712     VkFormat format = srcImage->imageFormat();
713     if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
714         return false;
715     }
716     SkASSERT(skgpu::VkFormatBytesPerBlock(format) == GrColorTypeBytesPerPixel(bufferColorType));
717 
718     // Set up copy region
719     VkBufferImageCopy region;
720     memset(&region, 0, sizeof(VkBufferImageCopy));
721     region.bufferOffset = offset;
722     region.bufferRowLength = rect.width();
723     region.bufferImageHeight = 0;
724     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
725     region.imageOffset = {rect.left(), rect.top(), 0};
726     region.imageExtent = {(uint32_t)rect.width(), (uint32_t)rect.height(), 1};
727 
728     srcImage->setImageLayout(this,
729                              VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
730                              VK_ACCESS_TRANSFER_READ_BIT,
731                              VK_PIPELINE_STAGE_TRANSFER_BIT,
732                              false);
733 
734     this->currentCommandBuffer()->copyImageToBuffer(this, srcImage,
735                                                     VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
736                                                     transferBuffer, 1, &region);
737 
738     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
739     // Make sure the copy to buffer has finished.
740     vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
741                                VK_ACCESS_HOST_READ_BIT,
742                                VK_PIPELINE_STAGE_TRANSFER_BIT,
743                                VK_PIPELINE_STAGE_HOST_BIT,
744                                false);
745     return true;
746 }
747 
resolveImage(GrSurface * dst,GrVkRenderTarget * src,const SkIRect & srcRect,const SkIPoint & dstPoint)748 void GrVkGpu::resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
749                            const SkIPoint& dstPoint) {
750     if (!this->currentCommandBuffer()) {
751         return;
752     }
753 
754     SkASSERT(dst);
755     SkASSERT(src && src->colorAttachment() && src->colorAttachment()->numSamples() > 1);
756 
757     VkImageResolve resolveInfo;
758     resolveInfo.srcSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
759     resolveInfo.srcOffset = {srcRect.fLeft, srcRect.fTop, 0};
760     resolveInfo.dstSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
761     resolveInfo.dstOffset = {dstPoint.fX, dstPoint.fY, 0};
762     resolveInfo.extent = {(uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1};
763 
764     GrVkImage* dstImage;
765     GrRenderTarget* dstRT = dst->asRenderTarget();
766     GrTexture* dstTex = dst->asTexture();
767     if (dstTex) {
768         dstImage = static_cast<GrVkTexture*>(dstTex)->textureImage();
769     } else {
770         SkASSERT(dst->asRenderTarget());
771         dstImage = static_cast<GrVkRenderTarget*>(dstRT)->nonMSAAAttachment();
772     }
773     SkASSERT(dstImage);
774 
775     dstImage->setImageLayout(this,
776                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
777                              VK_ACCESS_TRANSFER_WRITE_BIT,
778                              VK_PIPELINE_STAGE_TRANSFER_BIT,
779                              false);
780 
781     src->colorAttachment()->setImageLayout(this,
782                                            VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
783                                            VK_ACCESS_TRANSFER_READ_BIT,
784                                            VK_PIPELINE_STAGE_TRANSFER_BIT,
785                                            false);
786     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src->colorAttachment()));
787     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
788     this->currentCommandBuffer()->resolveImage(this, *src->colorAttachment(), *dstImage, 1,
789                                                &resolveInfo);
790 }
791 
onResolveRenderTarget(GrRenderTarget * target,const SkIRect & resolveRect)792 void GrVkGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) {
793     SkASSERT(target->numSamples() > 1);
794     GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target);
795     SkASSERT(rt->colorAttachmentView() && rt->resolveAttachmentView());
796 
797     if (this->vkCaps().renderTargetSupportsDiscardableMSAA(rt)) {
798         // We would have resolved the RT during the render pass;
799         return;
800     }
801 
802     this->resolveImage(target, rt, resolveRect,
803                        SkIPoint::Make(resolveRect.x(), resolveRect.y()));
804 }
805 
uploadTexDataLinear(GrVkImage * texImage,SkIRect rect,GrColorType dataColorType,const void * data,size_t rowBytes)806 bool GrVkGpu::uploadTexDataLinear(GrVkImage* texImage,
807                                   SkIRect rect,
808                                   GrColorType dataColorType,
809                                   const void* data,
810                                   size_t rowBytes) {
811     SkASSERT(data);
812     SkASSERT(texImage->isLinearTiled());
813 
814     SkASSERT(SkIRect::MakeSize(texImage->dimensions()).contains(rect));
815 
816     size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
817     size_t trimRowBytes = rect.width() * bpp;
818 
819     SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == texImage->currentLayout() ||
820              VK_IMAGE_LAYOUT_GENERAL == texImage->currentLayout());
821     const VkImageSubresource subres = {
822         VK_IMAGE_ASPECT_COLOR_BIT,
823         0,  // mipLevel
824         0,  // arraySlice
825     };
826     VkSubresourceLayout layout;
827 
828     const skgpu::VulkanInterface* interface = this->vkInterface();
829 
830     GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
831                                                     texImage->image(),
832                                                     &subres,
833                                                     &layout));
834 
835     const skgpu::VulkanAlloc& alloc = texImage->alloc();
836     if (VK_NULL_HANDLE == alloc.fMemory) {
837         return false;
838     }
839     VkDeviceSize offset = rect.top()*layout.rowPitch + rect.left()*bpp;
840     VkDeviceSize size = rect.height()*layout.rowPitch;
841     SkASSERT(size + offset <= alloc.fSize);
842     auto checkResult = [this](VkResult result) {
843         return this->checkVkResult(result);
844     };
845     auto allocator = this->memoryAllocator();
846     void* mapPtr = skgpu::VulkanMemory::MapAlloc(allocator, alloc, checkResult);
847     if (!mapPtr) {
848         return false;
849     }
850     mapPtr = reinterpret_cast<char*>(mapPtr) + offset;
851 
852     SkRectMemcpy(mapPtr,
853                  static_cast<size_t>(layout.rowPitch),
854                  data,
855                  rowBytes,
856                  trimRowBytes,
857                  rect.height());
858 
859     skgpu::VulkanMemory::FlushMappedAlloc(allocator, alloc, offset, size, checkResult);
860     skgpu::VulkanMemory::UnmapAlloc(allocator, alloc);
861 
862     return true;
863 }
864 
865 // This fills in the 'regions' vector in preparation for copying a buffer to an image.
866 // 'individualMipOffsets' is filled in as a side-effect.
fill_in_compressed_regions(GrStagingBufferManager * stagingBufferManager,TArray<VkBufferImageCopy> * regions,TArray<size_t> * individualMipOffsets,GrStagingBufferManager::Slice * slice,SkTextureCompressionType compression,VkFormat vkFormat,SkISize dimensions,skgpu::Mipmapped mipmapped)867 static size_t fill_in_compressed_regions(GrStagingBufferManager* stagingBufferManager,
868                                          TArray<VkBufferImageCopy>* regions,
869                                          TArray<size_t>* individualMipOffsets,
870                                          GrStagingBufferManager::Slice* slice,
871                                          SkTextureCompressionType compression,
872                                          VkFormat vkFormat,
873                                          SkISize dimensions,
874                                          skgpu::Mipmapped mipmapped) {
875     SkASSERT(compression != SkTextureCompressionType::kNone);
876     int numMipLevels = 1;
877     if (mipmapped == skgpu::Mipmapped::kYes) {
878         numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
879     }
880 
881     regions->reserve_exact(regions->size() + numMipLevels);
882     individualMipOffsets->reserve_exact(individualMipOffsets->size() + numMipLevels);
883 
884     size_t bytesPerBlock = skgpu::VkFormatBytesPerBlock(vkFormat);
885 
886     size_t bufferSize = SkCompressedDataSize(
887             compression, dimensions, individualMipOffsets, mipmapped == skgpu::Mipmapped::kYes);
888     SkASSERT(individualMipOffsets->size() == numMipLevels);
889 
890     // Get a staging buffer slice to hold our mip data.
891     // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
892     size_t alignment = bytesPerBlock;
893     switch (alignment & 0b11) {
894         case 0:                     break;   // alignment is already a multiple of 4.
895         case 2:     alignment *= 2; break;   // alignment is a multiple of 2 but not 4.
896         default:    alignment *= 4; break;   // alignment is not a multiple of 2.
897     }
898     *slice = stagingBufferManager->allocateStagingBufferSlice(bufferSize, alignment);
899     if (!slice->fBuffer) {
900         return 0;
901     }
902 
903     for (int i = 0; i < numMipLevels; ++i) {
904         VkBufferImageCopy& region = regions->push_back();
905         memset(&region, 0, sizeof(VkBufferImageCopy));
906         region.bufferOffset = slice->fOffset + (*individualMipOffsets)[i];
907         SkISize revisedDimensions = skgpu::CompressedDimensions(compression, dimensions);
908         region.bufferRowLength = revisedDimensions.width();
909         region.bufferImageHeight = revisedDimensions.height();
910         region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(i), 0, 1};
911         region.imageOffset = {0, 0, 0};
912         region.imageExtent = {SkToU32(dimensions.width()),
913                               SkToU32(dimensions.height()), 1};
914 
915         dimensions = {std::max(1, dimensions.width() /2),
916                       std::max(1, dimensions.height()/2)};
917     }
918 
919     return bufferSize;
920 }
921 
uploadTexDataOptimal(GrVkImage * texImage,SkIRect rect,GrColorType dataColorType,const GrMipLevel texels[],int mipLevelCount)922 bool GrVkGpu::uploadTexDataOptimal(GrVkImage* texImage,
923                                    SkIRect rect,
924                                    GrColorType dataColorType,
925                                    const GrMipLevel texels[],
926                                    int mipLevelCount) {
927     if (!this->currentCommandBuffer()) {
928         return false;
929     }
930 
931     SkASSERT(!texImage->isLinearTiled());
932     // The assumption is either that we have no mipmaps, or that our rect is the entire texture
933     SkASSERT(mipLevelCount == 1 || rect == SkIRect::MakeSize(texImage->dimensions()));
934 
935     // We assume that if the texture has mip levels, we either upload to all the levels or just the
936     // first.
937     SkASSERT(mipLevelCount == 1 || mipLevelCount == (int)texImage->mipLevels());
938 
939     SkASSERT(!rect.isEmpty());
940 
941     SkASSERT(this->vkCaps().surfaceSupportsWritePixels(texImage));
942 
943     SkASSERT(this->vkCaps().isVkFormatTexturable(texImage->imageFormat()));
944     size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
945 
946     // texels is const.
947     // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes.
948     // Because of this we need to make a non-const shallow copy of texels.
949     AutoTArray<GrMipLevel> texelsShallowCopy(mipLevelCount);
950     std::copy_n(texels, mipLevelCount, texelsShallowCopy.get());
951 
952     TArray<size_t> individualMipOffsets;
953     size_t combinedBufferSize;
954     if (mipLevelCount > 1) {
955         combinedBufferSize = GrComputeTightCombinedBufferSize(bpp,
956                                                               rect.size(),
957                                                               &individualMipOffsets,
958                                                               mipLevelCount);
959     } else {
960         SkASSERT(texelsShallowCopy[0].fPixels && texelsShallowCopy[0].fRowBytes);
961         combinedBufferSize = rect.width()*rect.height()*bpp;
962         individualMipOffsets.push_back(0);
963     }
964     SkASSERT(combinedBufferSize);
965 
966     // Get a staging buffer slice to hold our mip data.
967     // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
968     size_t alignment = bpp;
969     switch (alignment & 0b11) {
970         case 0:                     break;   // alignment is already a multiple of 4.
971         case 2:     alignment *= 2; break;   // alignment is a multiple of 2 but not 4.
972         default:    alignment *= 4; break;   // alignment is not a multiple of 2.
973     }
974     GrStagingBufferManager::Slice slice =
975             fStagingBufferManager.allocateStagingBufferSlice(combinedBufferSize, alignment);
976     if (!slice.fBuffer) {
977         return false;
978     }
979 
980     int uploadLeft = rect.left();
981     int uploadTop = rect.top();
982 
983     char* buffer = (char*) slice.fOffsetMapPtr;
984     TArray<VkBufferImageCopy> regions(mipLevelCount);
985 
986     int currentWidth = rect.width();
987     int currentHeight = rect.height();
988     for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
989         if (texelsShallowCopy[currentMipLevel].fPixels) {
990             const size_t trimRowBytes = currentWidth * bpp;
991             const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
992 
993             // copy data into the buffer, skipping the trailing bytes
994             char* dst = buffer + individualMipOffsets[currentMipLevel];
995             const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
996             SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
997 
998             VkBufferImageCopy& region = regions.push_back();
999             memset(&region, 0, sizeof(VkBufferImageCopy));
1000             region.bufferOffset = slice.fOffset + individualMipOffsets[currentMipLevel];
1001             region.bufferRowLength = currentWidth;
1002             region.bufferImageHeight = currentHeight;
1003             region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1};
1004             region.imageOffset = {uploadLeft, uploadTop, 0};
1005             region.imageExtent = {(uint32_t)currentWidth, (uint32_t)currentHeight, 1};
1006         }
1007 
1008         currentWidth  = std::max(1,  currentWidth/2);
1009         currentHeight = std::max(1, currentHeight/2);
1010     }
1011 
1012     // Change layout of our target so it can be copied to
1013     texImage->setImageLayout(this,
1014                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1015                              VK_ACCESS_TRANSFER_WRITE_BIT,
1016                              VK_PIPELINE_STAGE_TRANSFER_BIT,
1017                              false);
1018 
1019     // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1020     // because we don't need the command buffer to ref the buffer here. The reason being is that
1021     // the buffer is coming from the staging manager and the staging manager will make sure the
1022     // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
1023     // upload in the frame.
1024     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
1025     this->currentCommandBuffer()->copyBufferToImage(this,
1026                                                     vkBuffer->vkBuffer(),
1027                                                     texImage,
1028                                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1029                                                     regions.size(),
1030                                                     regions.begin());
1031     return true;
1032 }
1033 
1034 // It's probably possible to roll this into uploadTexDataOptimal,
1035 // but for now it's easier to maintain as a separate entity.
uploadTexDataCompressed(GrVkImage * uploadTexture,SkTextureCompressionType compression,VkFormat vkFormat,SkISize dimensions,skgpu::Mipmapped mipmapped,const void * data,size_t dataSize)1036 bool GrVkGpu::uploadTexDataCompressed(GrVkImage* uploadTexture,
1037                                       SkTextureCompressionType compression,
1038                                       VkFormat vkFormat,
1039                                       SkISize dimensions,
1040                                       skgpu::Mipmapped mipmapped,
1041                                       const void* data,
1042                                       size_t dataSize) {
1043     if (!this->currentCommandBuffer()) {
1044         return false;
1045     }
1046     SkASSERT(data);
1047     SkASSERT(!uploadTexture->isLinearTiled());
1048     // For now the assumption is that our rect is the entire texture.
1049     // Compressed textures are read-only so this should be a reasonable assumption.
1050     SkASSERT(dimensions.fWidth == uploadTexture->width() &&
1051              dimensions.fHeight == uploadTexture->height());
1052 
1053     if (dimensions.fWidth == 0 || dimensions.fHeight  == 0) {
1054         return false;
1055     }
1056 
1057     SkASSERT(uploadTexture->imageFormat() == vkFormat);
1058     SkASSERT(this->vkCaps().isVkFormatTexturable(vkFormat));
1059 
1060 
1061     GrStagingBufferManager::Slice slice;
1062     TArray<VkBufferImageCopy> regions;
1063     TArray<size_t> individualMipOffsets;
1064     SkDEBUGCODE(size_t combinedBufferSize =) fill_in_compressed_regions(&fStagingBufferManager,
1065                                                                         &regions,
1066                                                                         &individualMipOffsets,
1067                                                                         &slice,
1068                                                                         compression,
1069                                                                         vkFormat,
1070                                                                         dimensions,
1071                                                                         mipmapped);
1072     if (!slice.fBuffer) {
1073         return false;
1074     }
1075     SkASSERT(dataSize == combinedBufferSize);
1076 
1077     {
1078         char* buffer = (char*)slice.fOffsetMapPtr;
1079         memcpy(buffer, data, dataSize);
1080     }
1081 
1082     // Change layout of our target so it can be copied to
1083     uploadTexture->setImageLayout(this,
1084                                   VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1085                                   VK_ACCESS_TRANSFER_WRITE_BIT,
1086                                   VK_PIPELINE_STAGE_TRANSFER_BIT,
1087                                   false);
1088 
1089     // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1090     // because we don't need the command buffer to ref the buffer here. The reason being is that
1091     // the buffer is coming from the staging manager and the staging manager will make sure the
1092     // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
1093     // upload in the frame.
1094     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
1095     this->currentCommandBuffer()->copyBufferToImage(this,
1096                                                     vkBuffer->vkBuffer(),
1097                                                     uploadTexture,
1098                                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1099                                                     regions.size(),
1100                                                     regions.begin());
1101 
1102     return true;
1103 }
1104 
1105 ////////////////////////////////////////////////////////////////////////////////
1106 // TODO: make this take a skgpu::Mipmapped
onCreateTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,skgpu::Budgeted budgeted,GrProtected isProtected,int mipLevelCount,uint32_t levelClearMask,std::string_view label)1107 sk_sp<GrTexture> GrVkGpu::onCreateTexture(SkISize dimensions,
1108                                           const GrBackendFormat& format,
1109                                           GrRenderable renderable,
1110                                           int renderTargetSampleCnt,
1111                                           skgpu::Budgeted budgeted,
1112                                           GrProtected isProtected,
1113                                           int mipLevelCount,
1114                                           uint32_t levelClearMask,
1115                                           std::string_view label) {
1116     VkFormat pixelFormat;
1117     SkAssertResult(GrBackendFormats::AsVkFormat(format, &pixelFormat));
1118     SkASSERT(!skgpu::VkFormatIsCompressed(pixelFormat));
1119     SkASSERT(mipLevelCount > 0);
1120 
1121     GrMipmapStatus mipmapStatus =
1122             mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
1123 
1124     sk_sp<GrVkTexture> tex;
1125     if (renderable == GrRenderable::kYes) {
1126         tex = GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
1127                 this, budgeted, dimensions, pixelFormat, mipLevelCount, renderTargetSampleCnt,
1128                 mipmapStatus, isProtected, label);
1129     } else {
1130         tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
1131                                           mipLevelCount, isProtected, mipmapStatus, label);
1132     }
1133 
1134     if (!tex) {
1135         return nullptr;
1136     }
1137 
1138     if (levelClearMask) {
1139         if (!this->currentCommandBuffer()) {
1140             return nullptr;
1141         }
1142         STArray<1, VkImageSubresourceRange> ranges;
1143         bool inRange = false;
1144         GrVkImage* texImage = tex->textureImage();
1145         for (uint32_t i = 0; i < texImage->mipLevels(); ++i) {
1146             if (levelClearMask & (1U << i)) {
1147                 if (inRange) {
1148                     ranges.back().levelCount++;
1149                 } else {
1150                     auto& range = ranges.push_back();
1151                     range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1152                     range.baseArrayLayer = 0;
1153                     range.baseMipLevel = i;
1154                     range.layerCount = 1;
1155                     range.levelCount = 1;
1156                     inRange = true;
1157                 }
1158             } else if (inRange) {
1159                 inRange = false;
1160             }
1161         }
1162         SkASSERT(!ranges.empty());
1163         static constexpr VkClearColorValue kZeroClearColor = {};
1164         texImage->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1165                             VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1166         this->currentCommandBuffer()->clearColorImage(this, texImage, &kZeroClearColor,
1167                                                       ranges.size(), ranges.begin());
1168     }
1169     return tex;
1170 }
1171 
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Budgeted budgeted,skgpu::Mipmapped mipmapped,GrProtected isProtected,const void * data,size_t dataSize)1172 sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(SkISize dimensions,
1173                                                     const GrBackendFormat& format,
1174                                                     skgpu::Budgeted budgeted,
1175                                                     skgpu::Mipmapped mipmapped,
1176                                                     GrProtected isProtected,
1177                                                     const void* data,
1178                                                     size_t dataSize) {
1179     VkFormat pixelFormat;
1180     SkAssertResult(GrBackendFormats::AsVkFormat(format, &pixelFormat));
1181     SkASSERT(skgpu::VkFormatIsCompressed(pixelFormat));
1182 
1183     int numMipLevels = 1;
1184     if (mipmapped == skgpu::Mipmapped::kYes) {
1185         numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height())+1;
1186     }
1187 
1188     GrMipmapStatus mipmapStatus = (mipmapped == skgpu::Mipmapped::kYes)
1189                                           ? GrMipmapStatus::kValid
1190                                           : GrMipmapStatus::kNotAllocated;
1191 
1192     auto tex = GrVkTexture::MakeNewTexture(this,
1193                                            budgeted,
1194                                            dimensions,
1195                                            pixelFormat,
1196                                            numMipLevels,
1197                                            isProtected,
1198                                            mipmapStatus,
1199                                            /*label=*/"VkGpu_CreateCompressedTexture");
1200     if (!tex) {
1201         return nullptr;
1202     }
1203 
1204     SkTextureCompressionType compression = GrBackendFormatToCompressionType(format);
1205     if (!this->uploadTexDataCompressed(tex->textureImage(), compression, pixelFormat,
1206                                        dimensions, mipmapped, data, dataSize)) {
1207         return nullptr;
1208     }
1209 
1210     return tex;
1211 }
1212 
1213 ////////////////////////////////////////////////////////////////////////////////
1214 
updateBuffer(sk_sp<GrVkBuffer> buffer,const void * src,VkDeviceSize offset,VkDeviceSize size)1215 bool GrVkGpu::updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src,
1216                            VkDeviceSize offset, VkDeviceSize size) {
1217     if (!this->currentCommandBuffer()) {
1218         return false;
1219     }
1220     add_transfer_dst_buffer_mem_barrier(this,
1221                                         static_cast<GrVkBuffer*>(buffer.get()),
1222                                         offset,
1223                                         size,
1224                                         /*after=*/false);
1225     this->currentCommandBuffer()->updateBuffer(this, buffer, offset, size, src);
1226     add_transfer_dst_buffer_mem_barrier(this,
1227                                         static_cast<GrVkBuffer*>(buffer.get()),
1228                                         offset,
1229                                         size,
1230                                         /*after=*/true);
1231 
1232     return true;
1233 }
1234 
zeroBuffer(sk_sp<GrGpuBuffer> buffer)1235 bool GrVkGpu::zeroBuffer(sk_sp<GrGpuBuffer> buffer) {
1236     if (!this->currentCommandBuffer()) {
1237         return false;
1238     }
1239 
1240     add_transfer_dst_buffer_mem_barrier(this,
1241                                         static_cast<GrVkBuffer*>(buffer.get()),
1242                                         /*offset=*/0,
1243                                         buffer->size(),
1244                                         /*after=*/false);
1245     this->currentCommandBuffer()->fillBuffer(this,
1246                                              buffer,
1247                                              /*offset=*/0,
1248                                              buffer->size(),
1249                                              /*data=*/0);
1250     add_transfer_dst_buffer_mem_barrier(this,
1251                                         static_cast<GrVkBuffer*>(buffer.get()),
1252                                         /*offset=*/0,
1253                                         buffer->size(),
1254                                         /*after=*/true);
1255 
1256     return true;
1257 }
1258 
1259 ////////////////////////////////////////////////////////////////////////////////
1260 
check_image_info(const GrVkCaps & caps,const GrVkImageInfo & info,bool needsAllocation,uint32_t graphicsQueueIndex)1261 static bool check_image_info(const GrVkCaps& caps,
1262                              const GrVkImageInfo& info,
1263                              bool needsAllocation,
1264                              uint32_t graphicsQueueIndex) {
1265     if (VK_NULL_HANDLE == info.fImage) {
1266         return false;
1267     }
1268 
1269     if (VK_NULL_HANDLE == info.fAlloc.fMemory && needsAllocation) {
1270         return false;
1271     }
1272 
1273     if (info.fImageLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR && !caps.supportsSwapchain()) {
1274         return false;
1275     }
1276 
1277     if (info.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
1278         info.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
1279         info.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
1280         if (info.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
1281             if (info.fCurrentQueueFamily != graphicsQueueIndex) {
1282                 return false;
1283             }
1284         } else {
1285             return false;
1286         }
1287     }
1288 
1289     if (info.fYcbcrConversionInfo.isValid()) {
1290         if (!caps.supportsYcbcrConversion()) {
1291             return false;
1292         }
1293         if (info.fYcbcrConversionInfo.fExternalFormat != 0) {
1294             return true;
1295         }
1296     }
1297 
1298     // We currently require everything to be made with transfer bits set
1299     if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) ||
1300         !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
1301         return false;
1302     }
1303 
1304     return true;
1305 }
1306 
check_tex_image_info(const GrVkCaps & caps,const GrVkImageInfo & info)1307 static bool check_tex_image_info(const GrVkCaps& caps, const GrVkImageInfo& info) {
1308     // We don't support directly importing multisampled textures for sampling from shaders.
1309     if (info.fSampleCount != 1) {
1310         return false;
1311     }
1312 
1313     if (info.fYcbcrConversionInfo.isValid() && info.fYcbcrConversionInfo.fExternalFormat != 0) {
1314         return true;
1315     }
1316     if (info.fImageTiling == VK_IMAGE_TILING_OPTIMAL) {
1317         if (!caps.isVkFormatTexturable(info.fFormat)) {
1318             return false;
1319         }
1320     } else if (info.fImageTiling == VK_IMAGE_TILING_LINEAR) {
1321         if (!caps.isVkFormatTexturableLinearly(info.fFormat)) {
1322             return false;
1323         }
1324     } else if (info.fImageTiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
1325         if (!caps.supportsDRMFormatModifiers()) {
1326             return false;
1327         }
1328         // To be technically correct we should query the vulkan support for VkFormat and
1329         // drmFormatModifier pairs to confirm the required feature support is there. However, we
1330         // currently don't have our caps and format tables set up to do this effeciently. So
1331         // instead we just rely on the client's passed in VkImageUsageFlags and assume they we set
1332         // up using valid features (checked below). In practice this should all be safe because
1333         // currently we are setting all drm format modifier textures to have a
1334         // GrTextureType::kExternal so we just really need to be able to read these video VkImage in
1335         // a shader. The video decoder isn't going to give us VkImages that don't support being
1336         // sampled.
1337     } else {
1338         SkUNREACHABLE;
1339     }
1340 
1341     // We currently require all textures to be made with sample support
1342     if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT)) {
1343         return false;
1344     }
1345 
1346     return true;
1347 }
1348 
check_rt_image_info(const GrVkCaps & caps,const GrVkImageInfo & info,bool resolveOnly)1349 static bool check_rt_image_info(const GrVkCaps& caps, const GrVkImageInfo& info, bool resolveOnly) {
1350     if (!caps.isFormatRenderable(info.fFormat, info.fSampleCount)) {
1351         return false;
1352     }
1353     if (!resolveOnly && !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
1354         return false;
1355     }
1356     return true;
1357 }
1358 
onWrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)1359 sk_sp<GrTexture> GrVkGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
1360                                                GrWrapOwnership ownership,
1361                                                GrWrapCacheable cacheable,
1362                                                GrIOType ioType) {
1363     GrVkImageInfo imageInfo;
1364     if (!GrBackendTextures::GetVkImageInfo(backendTex, &imageInfo)) {
1365         return nullptr;
1366     }
1367 
1368     if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1369                           this->queueIndex())) {
1370         return nullptr;
1371     }
1372 
1373     if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1374         return nullptr;
1375     }
1376 
1377     if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1378         return nullptr;
1379     }
1380 
1381     sk_sp<skgpu::MutableTextureState> mutableState = backendTex.getMutableState();
1382     SkASSERT(mutableState);
1383     return GrVkTexture::MakeWrappedTexture(this, backendTex.dimensions(), ownership, cacheable,
1384                                            ioType, imageInfo, std::move(mutableState));
1385 }
1386 
onWrapCompressedBackendTexture(const GrBackendTexture & beTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)1387 sk_sp<GrTexture> GrVkGpu::onWrapCompressedBackendTexture(const GrBackendTexture& beTex,
1388                                                          GrWrapOwnership ownership,
1389                                                          GrWrapCacheable cacheable) {
1390     return this->onWrapBackendTexture(beTex, ownership, cacheable, kRead_GrIOType);
1391 }
1392 
onWrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)1393 sk_sp<GrTexture> GrVkGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
1394                                                          int sampleCnt,
1395                                                          GrWrapOwnership ownership,
1396                                                          GrWrapCacheable cacheable) {
1397     GrVkImageInfo imageInfo;
1398     if (!GrBackendTextures::GetVkImageInfo(backendTex, &imageInfo)) {
1399         return nullptr;
1400     }
1401 
1402     if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1403                           this->queueIndex())) {
1404         return nullptr;
1405     }
1406 
1407     if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1408         return nullptr;
1409     }
1410     // If sampleCnt is > 1 we will create an intermediate MSAA VkImage and then resolve into
1411     // the wrapped VkImage.
1412     bool resolveOnly = sampleCnt > 1;
1413     if (!check_rt_image_info(this->vkCaps(), imageInfo, resolveOnly)) {
1414         return nullptr;
1415     }
1416 
1417     if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1418         return nullptr;
1419     }
1420 
1421     sampleCnt = this->vkCaps().getRenderTargetSampleCount(sampleCnt, imageInfo.fFormat);
1422 
1423     sk_sp<skgpu::MutableTextureState> mutableState = backendTex.getMutableState();
1424     SkASSERT(mutableState);
1425 
1426     return GrVkTextureRenderTarget::MakeWrappedTextureRenderTarget(this, backendTex.dimensions(),
1427                                                                    sampleCnt, ownership, cacheable,
1428                                                                    imageInfo,
1429                                                                    std::move(mutableState));
1430 }
1431 
onWrapBackendRenderTarget(const GrBackendRenderTarget & backendRT)1432 sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
1433     GrVkImageInfo info;
1434     if (!GrBackendRenderTargets::GetVkImageInfo(backendRT, &info)) {
1435         return nullptr;
1436     }
1437 
1438     if (!check_image_info(this->vkCaps(), info, false, this->queueIndex())) {
1439         return nullptr;
1440     }
1441 
1442     // We will always render directly to this VkImage.
1443     static bool kResolveOnly = false;
1444     if (!check_rt_image_info(this->vkCaps(), info, kResolveOnly)) {
1445         return nullptr;
1446     }
1447 
1448     if (backendRT.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1449         return nullptr;
1450     }
1451 
1452     sk_sp<skgpu::MutableTextureState> mutableState = backendRT.getMutableState();
1453     SkASSERT(mutableState);
1454 
1455     sk_sp<GrVkRenderTarget> tgt = GrVkRenderTarget::MakeWrappedRenderTarget(
1456             this, backendRT.dimensions(), backendRT.sampleCnt(), info, std::move(mutableState));
1457 
1458     // We don't allow the client to supply a premade stencil buffer. We always create one if needed.
1459     SkASSERT(!backendRT.stencilBits());
1460     if (tgt) {
1461         SkASSERT(tgt->canAttemptStencilAttachment(tgt->numSamples() > 1));
1462     }
1463 
1464     return tgt;
1465 }
1466 
onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)1467 sk_sp<GrRenderTarget> GrVkGpu::onWrapVulkanSecondaryCBAsRenderTarget(
1468         const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) {
1469     int maxSize = this->caps()->maxTextureSize();
1470     if (imageInfo.width() > maxSize || imageInfo.height() > maxSize) {
1471         return nullptr;
1472     }
1473 
1474     GrBackendFormat backendFormat = GrBackendFormats::MakeVk(vkInfo.fFormat);
1475     if (!backendFormat.isValid()) {
1476         return nullptr;
1477     }
1478     int sampleCnt = this->vkCaps().getRenderTargetSampleCount(1, vkInfo.fFormat);
1479     if (!sampleCnt) {
1480         return nullptr;
1481     }
1482 
1483     return GrVkRenderTarget::MakeSecondaryCBRenderTarget(this, imageInfo.dimensions(), vkInfo);
1484 }
1485 
loadMSAAFromResolve(GrVkCommandBuffer * commandBuffer,const GrVkRenderPass & renderPass,GrAttachment * dst,GrVkImage * src,const SkIRect & srcRect)1486 bool GrVkGpu::loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer,
1487                                   const GrVkRenderPass& renderPass,
1488                                   GrAttachment* dst,
1489                                   GrVkImage* src,
1490                                   const SkIRect& srcRect) {
1491     return fMSAALoadManager.loadMSAAFromResolve(this, commandBuffer, renderPass, dst, src, srcRect);
1492 }
1493 
onRegenerateMipMapLevels(GrTexture * tex)1494 bool GrVkGpu::onRegenerateMipMapLevels(GrTexture* tex) {
1495     if (!this->currentCommandBuffer()) {
1496         return false;
1497     }
1498     auto* vkTex = static_cast<GrVkTexture*>(tex)->textureImage();
1499     // don't do anything for linearly tiled textures (can't have mipmaps)
1500     if (vkTex->isLinearTiled()) {
1501         SkDebugf("Trying to create mipmap for linear tiled texture");
1502         return false;
1503     }
1504     SkASSERT(tex->textureType() == GrTextureType::k2D);
1505 
1506     // determine if we can blit to and from this format
1507     const GrVkCaps& caps = this->vkCaps();
1508     if (!caps.formatCanBeDstofBlit(vkTex->imageFormat(), false) ||
1509         !caps.formatCanBeSrcofBlit(vkTex->imageFormat(), false) ||
1510         !caps.mipmapSupport()) {
1511         return false;
1512     }
1513 
1514     int width = tex->width();
1515     int height = tex->height();
1516     VkImageBlit blitRegion;
1517     memset(&blitRegion, 0, sizeof(VkImageBlit));
1518 
1519     // SkMipmap doesn't include the base level in the level count so we have to add 1
1520     uint32_t levelCount = SkMipmap::ComputeLevelCount(tex->width(), tex->height()) + 1;
1521     SkASSERT(levelCount == vkTex->mipLevels());
1522 
1523     // change layout of the layers so we can write to them.
1524     vkTex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_ACCESS_TRANSFER_WRITE_BIT,
1525                           VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1526 
1527     // setup memory barrier
1528     SkASSERT(GrVkFormatIsSupported(vkTex->imageFormat()));
1529     VkImageMemoryBarrier imageMemoryBarrier = {
1530             VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,  // sType
1531             nullptr,                                 // pNext
1532             VK_ACCESS_TRANSFER_WRITE_BIT,            // srcAccessMask
1533             VK_ACCESS_TRANSFER_READ_BIT,             // dstAccessMask
1534             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,    // oldLayout
1535             VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,    // newLayout
1536             VK_QUEUE_FAMILY_IGNORED,                 // srcQueueFamilyIndex
1537             VK_QUEUE_FAMILY_IGNORED,                 // dstQueueFamilyIndex
1538             vkTex->image(),                          // image
1539             {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}  // subresourceRange
1540     };
1541 
1542     // Blit the miplevels
1543     uint32_t mipLevel = 1;
1544     while (mipLevel < levelCount) {
1545         int prevWidth = width;
1546         int prevHeight = height;
1547         width = std::max(1, width / 2);
1548         height = std::max(1, height / 2);
1549 
1550         imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1551         this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1552                                     VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1553 
1554         blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
1555         blitRegion.srcOffsets[0] = { 0, 0, 0 };
1556         blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
1557         blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
1558         blitRegion.dstOffsets[0] = { 0, 0, 0 };
1559         blitRegion.dstOffsets[1] = { width, height, 1 };
1560         this->currentCommandBuffer()->blitImage(this,
1561                                                 vkTex->resource(),
1562                                                 vkTex->image(),
1563                                                 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1564                                                 vkTex->resource(),
1565                                                 vkTex->image(),
1566                                                 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1567                                                 1,
1568                                                 &blitRegion,
1569                                                 VK_FILTER_LINEAR);
1570         ++mipLevel;
1571     }
1572     if (levelCount > 1) {
1573         // This barrier logically is not needed, but it changes the final level to the same layout
1574         // as all the others, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL. This makes tracking of the
1575         // layouts and future layout changes easier. The alternative here would be to track layout
1576         // and memory accesses per layer which doesn't seem work it.
1577         imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1578         this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1579                                     VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1580         vkTex->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1581     }
1582     return true;
1583 }
1584 
1585 ////////////////////////////////////////////////////////////////////////////////
1586 
makeStencilAttachment(const GrBackendFormat &,SkISize dimensions,int numStencilSamples)1587 sk_sp<GrAttachment> GrVkGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
1588                                                    SkISize dimensions, int numStencilSamples) {
1589     VkFormat sFmt = this->vkCaps().preferredStencilFormat();
1590 
1591     fStats.incStencilAttachmentCreates();
1592     return GrVkImage::MakeStencil(this, dimensions, numStencilSamples, sFmt);
1593 }
1594 
makeMSAAAttachment(SkISize dimensions,const GrBackendFormat & format,int numSamples,GrProtected isProtected,GrMemoryless memoryless)1595 sk_sp<GrAttachment> GrVkGpu::makeMSAAAttachment(SkISize dimensions,
1596                                                 const GrBackendFormat& format,
1597                                                 int numSamples,
1598                                                 GrProtected isProtected,
1599                                                 GrMemoryless memoryless) {
1600     VkFormat pixelFormat;
1601     SkAssertResult(GrBackendFormats::AsVkFormat(format, &pixelFormat));
1602     SkASSERT(!skgpu::VkFormatIsCompressed(pixelFormat));
1603     SkASSERT(this->vkCaps().isFormatRenderable(pixelFormat, numSamples));
1604 
1605     fStats.incMSAAAttachmentCreates();
1606     return GrVkImage::MakeMSAA(this, dimensions, numSamples, pixelFormat, isProtected, memoryless);
1607 }
1608 
1609 ////////////////////////////////////////////////////////////////////////////////
1610 
copy_src_data(char * mapPtr,VkFormat vkFormat,const TArray<size_t> & individualMipOffsets,const GrPixmap srcData[],int numMipLevels)1611 bool copy_src_data(char* mapPtr,
1612                    VkFormat vkFormat,
1613                    const TArray<size_t>& individualMipOffsets,
1614                    const GrPixmap srcData[],
1615                    int numMipLevels) {
1616     SkASSERT(srcData && numMipLevels);
1617     SkASSERT(!skgpu::VkFormatIsCompressed(vkFormat));
1618     SkASSERT(individualMipOffsets.size() == numMipLevels);
1619     SkASSERT(mapPtr);
1620 
1621     size_t bytesPerPixel = skgpu::VkFormatBytesPerBlock(vkFormat);
1622 
1623     for (int level = 0; level < numMipLevels; ++level) {
1624         const size_t trimRB = srcData[level].info().width() * bytesPerPixel;
1625 
1626         SkRectMemcpy(mapPtr + individualMipOffsets[level], trimRB,
1627                      srcData[level].addr(), srcData[level].rowBytes(),
1628                      trimRB, srcData[level].height());
1629     }
1630     return true;
1631 }
1632 
createVkImageForBackendSurface(VkFormat vkFormat,SkISize dimensions,int sampleCnt,GrTexturable texturable,GrRenderable renderable,skgpu::Mipmapped mipmapped,GrVkImageInfo * info,GrProtected isProtected)1633 bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
1634                                              SkISize dimensions,
1635                                              int sampleCnt,
1636                                              GrTexturable texturable,
1637                                              GrRenderable renderable,
1638                                              skgpu::Mipmapped mipmapped,
1639                                              GrVkImageInfo* info,
1640                                              GrProtected isProtected) {
1641     SkASSERT(texturable == GrTexturable::kYes || renderable == GrRenderable::kYes);
1642 
1643     if (fProtectedContext != isProtected) {
1644         return false;
1645     }
1646 
1647     if (texturable == GrTexturable::kYes && !fVkCaps->isVkFormatTexturable(vkFormat)) {
1648         return false;
1649     }
1650 
1651     // MSAA images are only currently used by createTestingOnlyBackendRenderTarget.
1652     if (sampleCnt > 1 && (texturable == GrTexturable::kYes || renderable == GrRenderable::kNo)) {
1653         return false;
1654     }
1655 
1656     if (renderable == GrRenderable::kYes) {
1657         sampleCnt = fVkCaps->getRenderTargetSampleCount(sampleCnt, vkFormat);
1658         if (!sampleCnt) {
1659             return false;
1660         }
1661     }
1662 
1663 
1664     int numMipLevels = 1;
1665     if (mipmapped == skgpu::Mipmapped::kYes) {
1666         numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
1667     }
1668 
1669     VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
1670                                    VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1671     if (texturable == GrTexturable::kYes) {
1672         usageFlags |= VK_IMAGE_USAGE_SAMPLED_BIT;
1673     }
1674     if (renderable == GrRenderable::kYes) {
1675         usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
1676         // We always make our render targets support being used as input attachments
1677         usageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
1678     }
1679 
1680     GrVkImage::ImageDesc imageDesc;
1681     imageDesc.fImageType = VK_IMAGE_TYPE_2D;
1682     imageDesc.fFormat = vkFormat;
1683     imageDesc.fWidth = dimensions.width();
1684     imageDesc.fHeight = dimensions.height();
1685     imageDesc.fLevels = numMipLevels;
1686     imageDesc.fSamples = sampleCnt;
1687     imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
1688     imageDesc.fUsageFlags = usageFlags;
1689     imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1690     imageDesc.fIsProtected = fProtectedContext;
1691 
1692     if (!GrVkImage::InitImageInfo(this, imageDesc, info)) {
1693         SkDebugf("Failed to init image info\n");
1694         return false;
1695     }
1696 
1697     return true;
1698 }
1699 
onClearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<skgpu::RefCntedCallback> finishedCallback,std::array<float,4> color)1700 bool GrVkGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
1701                                     sk_sp<skgpu::RefCntedCallback> finishedCallback,
1702                                     std::array<float, 4> color) {
1703     GrVkImageInfo info;
1704     SkAssertResult(GrBackendTextures::GetVkImageInfo(backendTexture, &info));
1705 
1706     sk_sp<skgpu::MutableTextureState> mutableState = backendTexture.getMutableState();
1707     SkASSERT(mutableState);
1708     sk_sp<GrVkTexture> texture =
1709                 GrVkTexture::MakeWrappedTexture(this, backendTexture.dimensions(),
1710                                                 kBorrow_GrWrapOwnership, GrWrapCacheable::kNo,
1711                                                 kRW_GrIOType, info, std::move(mutableState));
1712     if (!texture) {
1713         return false;
1714     }
1715     GrVkImage* texImage = texture->textureImage();
1716 
1717     GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
1718     if (!cmdBuffer) {
1719         return false;
1720     }
1721 
1722     texImage->setImageLayout(this,
1723                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1724                              VK_ACCESS_TRANSFER_WRITE_BIT,
1725                              VK_PIPELINE_STAGE_TRANSFER_BIT,
1726                              false);
1727 
1728     // CmdClearColorImage doesn't work for compressed formats
1729     SkASSERT(!skgpu::VkFormatIsCompressed(info.fFormat));
1730 
1731     VkClearColorValue vkColor;
1732     // If we ever support SINT or UINT formats this needs to be updated to use the int32 and
1733     // uint32 union members in those cases.
1734     vkColor.float32[0] = color[0];
1735     vkColor.float32[1] = color[1];
1736     vkColor.float32[2] = color[2];
1737     vkColor.float32[3] = color[3];
1738     VkImageSubresourceRange range;
1739     range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1740     range.baseArrayLayer = 0;
1741     range.baseMipLevel = 0;
1742     range.layerCount = 1;
1743     range.levelCount = info.fLevelCount;
1744     cmdBuffer->clearColorImage(this, texImage, &vkColor, 1, &range);
1745 
1746     // Change image layout to shader read since if we use this texture as a borrowed
1747     // texture within Ganesh we require that its layout be set to that
1748     texImage->setImageLayout(this, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1749                                   VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1750                                   false);
1751 
1752     if (finishedCallback) {
1753         this->addFinishedCallback(std::move(finishedCallback));
1754     }
1755     return true;
1756 }
1757 
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,skgpu::Mipmapped mipmapped,GrProtected isProtected,std::string_view label)1758 GrBackendTexture GrVkGpu::onCreateBackendTexture(SkISize dimensions,
1759                                                  const GrBackendFormat& format,
1760                                                  GrRenderable renderable,
1761                                                  skgpu::Mipmapped mipmapped,
1762                                                  GrProtected isProtected,
1763                                                  std::string_view label) {
1764     const GrVkCaps& caps = this->vkCaps();
1765 
1766     if (fProtectedContext != isProtected) {
1767         return {};
1768     }
1769 
1770     VkFormat vkFormat;
1771     if (!GrBackendFormats::AsVkFormat(format, &vkFormat)) {
1772         return {};
1773     }
1774 
1775     // TODO: move the texturability check up to GrGpu::createBackendTexture and just assert here
1776     if (!caps.isVkFormatTexturable(vkFormat)) {
1777         return {};
1778     }
1779 
1780     if (skgpu::VkFormatNeedsYcbcrSampler(vkFormat)) {
1781         return {};
1782     }
1783 
1784     GrVkImageInfo info;
1785     if (!this->createVkImageForBackendSurface(vkFormat, dimensions, 1, GrTexturable::kYes,
1786                                               renderable, mipmapped, &info, isProtected)) {
1787         return {};
1788     }
1789 
1790     return GrBackendTextures::MakeVk(dimensions.width(), dimensions.height(), info);
1791 }
1792 
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Mipmapped mipmapped,GrProtected isProtected)1793 GrBackendTexture GrVkGpu::onCreateCompressedBackendTexture(SkISize dimensions,
1794                                                            const GrBackendFormat& format,
1795                                                            skgpu::Mipmapped mipmapped,
1796                                                            GrProtected isProtected) {
1797     return this->onCreateBackendTexture(dimensions,
1798                                         format,
1799                                         GrRenderable::kNo,
1800                                         mipmapped,
1801                                         isProtected,
1802                                         /*label=*/"VkGpu_CreateCompressedBackendTexture");
1803 }
1804 
onUpdateCompressedBackendTexture(const GrBackendTexture & backendTexture,sk_sp<skgpu::RefCntedCallback> finishedCallback,const void * data,size_t size)1805 bool GrVkGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1806                                                sk_sp<skgpu::RefCntedCallback> finishedCallback,
1807                                                const void* data,
1808                                                size_t size) {
1809     GrVkImageInfo info;
1810     SkAssertResult(GrBackendTextures::GetVkImageInfo(backendTexture, &info));
1811 
1812     sk_sp<skgpu::MutableTextureState> mutableState = backendTexture.getMutableState();
1813     SkASSERT(mutableState);
1814     sk_sp<GrVkTexture> texture = GrVkTexture::MakeWrappedTexture(this,
1815                                                                  backendTexture.dimensions(),
1816                                                                  kBorrow_GrWrapOwnership,
1817                                                                  GrWrapCacheable::kNo,
1818                                                                  kRW_GrIOType,
1819                                                                  info,
1820                                                                  std::move(mutableState));
1821     if (!texture) {
1822         return false;
1823     }
1824 
1825     GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
1826     if (!cmdBuffer) {
1827         return false;
1828     }
1829     GrVkImage* image = texture->textureImage();
1830     image->setImageLayout(this,
1831                           VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1832                           VK_ACCESS_TRANSFER_WRITE_BIT,
1833                           VK_PIPELINE_STAGE_TRANSFER_BIT,
1834                           false);
1835 
1836     SkTextureCompressionType compression =
1837             GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1838 
1839     TArray<VkBufferImageCopy> regions;
1840     TArray<size_t> individualMipOffsets;
1841     GrStagingBufferManager::Slice slice;
1842 
1843     fill_in_compressed_regions(&fStagingBufferManager,
1844                                &regions,
1845                                &individualMipOffsets,
1846                                &slice,
1847                                compression,
1848                                info.fFormat,
1849                                backendTexture.dimensions(),
1850                                backendTexture.fMipmapped);
1851 
1852     if (!slice.fBuffer) {
1853         return false;
1854     }
1855 
1856     memcpy(slice.fOffsetMapPtr, data, size);
1857 
1858     cmdBuffer->addGrSurface(texture);
1859     // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1860     // because we don't need the command buffer to ref the buffer here. The reason being is that
1861     // the buffer is coming from the staging manager and the staging manager will make sure the
1862     // command buffer has a ref on the buffer. This avoids having to add and remove a ref for
1863     // every upload in the frame.
1864     cmdBuffer->copyBufferToImage(this,
1865                                  static_cast<GrVkBuffer*>(slice.fBuffer)->vkBuffer(),
1866                                  image,
1867                                  image->currentLayout(),
1868                                  regions.size(),
1869                                  regions.begin());
1870 
1871     // Change image layout to shader read since if we use this texture as a borrowed
1872     // texture within Ganesh we require that its layout be set to that
1873     image->setImageLayout(this,
1874                           VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1875                           VK_ACCESS_SHADER_READ_BIT,
1876                           VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1877                           false);
1878 
1879     if (finishedCallback) {
1880         this->addFinishedCallback(std::move(finishedCallback));
1881     }
1882     return true;
1883 }
1884 
set_layout_and_queue_from_mutable_state(GrVkGpu * gpu,GrVkImage * image,VkImageLayout newLayout,uint32_t newQueueFamilyIndex)1885 void set_layout_and_queue_from_mutable_state(GrVkGpu* gpu, GrVkImage* image,
1886                                              VkImageLayout newLayout,
1887                                              uint32_t newQueueFamilyIndex) {
1888     // Even though internally we use this helper for getting src access flags and stages they
1889     // can also be used for general dst flags since we don't know exactly what the client
1890     // plans on using the image for.
1891     if (newLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
1892         newLayout = image->currentLayout();
1893     }
1894     VkPipelineStageFlags dstStage = GrVkImage::LayoutToPipelineSrcStageFlags(newLayout);
1895     VkAccessFlags dstAccess = GrVkImage::LayoutToSrcAccessMask(newLayout);
1896 
1897     uint32_t currentQueueFamilyIndex = image->currentQueueFamilyIndex();
1898     auto isSpecialQueue = [](uint32_t queueFamilyIndex) {
1899         return queueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
1900                queueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT;
1901     };
1902     if (isSpecialQueue(currentQueueFamilyIndex) && isSpecialQueue(newQueueFamilyIndex)) {
1903         // It is illegal to have both the new and old queue be special queue families (i.e. external
1904         // or foreign).
1905         return;
1906     }
1907 
1908     image->setImageLayoutAndQueueIndex(gpu, newLayout, dstAccess, dstStage, false,
1909                                        newQueueFamilyIndex);
1910 }
1911 
setBackendSurfaceState(GrVkImageInfo info,sk_sp<skgpu::MutableTextureState> currentState,SkISize dimensions,VkImageLayout newLayout,uint32_t newQueueFamilyIndex,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)1912 bool GrVkGpu::setBackendSurfaceState(GrVkImageInfo info,
1913                                      sk_sp<skgpu::MutableTextureState> currentState,
1914                                      SkISize dimensions,
1915                                      VkImageLayout newLayout,
1916                                      uint32_t newQueueFamilyIndex,
1917                                      skgpu::MutableTextureState* previousState,
1918                                      sk_sp<skgpu::RefCntedCallback> finishedCallback) {
1919     sk_sp<GrVkImage> texture = GrVkImage::MakeWrapped(this,
1920                                                       dimensions,
1921                                                       info,
1922                                                       std::move(currentState),
1923                                                       GrVkImage::UsageFlags::kColorAttachment,
1924                                                       kBorrow_GrWrapOwnership,
1925                                                       GrWrapCacheable::kNo,
1926                                                       "VkGpu_SetBackendSurfaceState",
1927                                                       /*forSecondaryCB=*/false);
1928     SkASSERT(texture);
1929     if (!texture) {
1930         return false;
1931     }
1932     if (previousState) {
1933         previousState->set(*texture->getMutableState());
1934     }
1935     set_layout_and_queue_from_mutable_state(this, texture.get(), newLayout, newQueueFamilyIndex);
1936     if (finishedCallback) {
1937         this->addFinishedCallback(std::move(finishedCallback));
1938     }
1939     return true;
1940 }
1941 
setBackendTextureState(const GrBackendTexture & backendTeture,const skgpu::MutableTextureState & newState,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)1942 bool GrVkGpu::setBackendTextureState(const GrBackendTexture& backendTeture,
1943                                      const skgpu::MutableTextureState& newState,
1944                                      skgpu::MutableTextureState* previousState,
1945                                      sk_sp<skgpu::RefCntedCallback> finishedCallback) {
1946     GrVkImageInfo info;
1947     SkAssertResult(GrBackendTextures::GetVkImageInfo(backendTeture, &info));
1948     sk_sp<skgpu::MutableTextureState> currentState = backendTeture.getMutableState();
1949     SkASSERT(currentState);
1950     SkASSERT(newState.isValid() && newState.backend() == skgpu::BackendApi::kVulkan);
1951     return this->setBackendSurfaceState(info, std::move(currentState), backendTeture.dimensions(),
1952                                         skgpu::MutableTextureStates::GetVkImageLayout(newState),
1953                                         skgpu::MutableTextureStates::GetVkQueueFamilyIndex(newState),
1954                                         previousState,
1955                                         std::move(finishedCallback));
1956 }
1957 
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const skgpu::MutableTextureState & newState,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)1958 bool GrVkGpu::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
1959                                           const skgpu::MutableTextureState& newState,
1960                                           skgpu::MutableTextureState* previousState,
1961                                           sk_sp<skgpu::RefCntedCallback> finishedCallback) {
1962     GrVkImageInfo info;
1963     SkAssertResult(GrBackendRenderTargets::GetVkImageInfo(backendRenderTarget, &info));
1964     sk_sp<skgpu::MutableTextureState> currentState = backendRenderTarget.getMutableState();
1965     SkASSERT(currentState);
1966     SkASSERT(newState.backend() == skgpu::BackendApi::kVulkan);
1967     return this->setBackendSurfaceState(info, std::move(currentState),
1968                                         backendRenderTarget.dimensions(),
1969                                         skgpu::MutableTextureStates::GetVkImageLayout(newState),
1970                                         skgpu::MutableTextureStates::GetVkQueueFamilyIndex(newState),
1971                                         previousState, std::move(finishedCallback));
1972 }
1973 
xferBarrier(GrRenderTarget * rt,GrXferBarrierType barrierType)1974 void GrVkGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType barrierType) {
1975     GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1976     VkPipelineStageFlags dstStage;
1977     VkAccessFlags dstAccess;
1978     if (barrierType == kBlend_GrXferBarrierType) {
1979         dstStage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
1980         dstAccess = VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT;
1981     } else {
1982         SkASSERT(barrierType == kTexture_GrXferBarrierType);
1983         dstStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
1984         dstAccess = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
1985     }
1986     GrVkImage* image = vkRT->colorAttachment();
1987     VkImageMemoryBarrier barrier;
1988     barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1989     barrier.pNext = nullptr;
1990     barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1991     barrier.dstAccessMask = dstAccess;
1992     barrier.oldLayout = image->currentLayout();
1993     barrier.newLayout = barrier.oldLayout;
1994     barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1995     barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1996     barrier.image = image->image();
1997     barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, image->mipLevels(), 0, 1};
1998     this->addImageMemoryBarrier(image->resource(),
1999                                 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
2000                                 dstStage, true, &barrier);
2001 }
2002 
deleteBackendTexture(const GrBackendTexture & tex)2003 void GrVkGpu::deleteBackendTexture(const GrBackendTexture& tex) {
2004     SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
2005 
2006     GrVkImageInfo info;
2007     if (GrBackendTextures::GetVkImageInfo(tex, &info)) {
2008         GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
2009     }
2010 }
2011 
compile(const GrProgramDesc & desc,const GrProgramInfo & programInfo)2012 bool GrVkGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
2013     GrVkRenderPass::AttachmentsDescriptor attachmentsDescriptor;
2014     GrVkRenderPass::AttachmentFlags attachmentFlags;
2015     GrVkRenderTarget::ReconstructAttachmentsDescriptor(this->vkCaps(), programInfo,
2016                                                        &attachmentsDescriptor, &attachmentFlags);
2017 
2018     GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
2019     if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kBlend) {
2020         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend;
2021     }
2022     if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kTexture) {
2023         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment;
2024     }
2025 
2026     GrVkRenderPass::LoadFromResolve loadFromResolve = GrVkRenderPass::LoadFromResolve::kNo;
2027     if (this->vkCaps().programInfoWillUseDiscardableMSAA(programInfo) &&
2028         programInfo.colorLoadOp() == GrLoadOp::kLoad) {
2029         loadFromResolve = GrVkRenderPass::LoadFromResolve::kLoad;
2030     }
2031     sk_sp<const GrVkRenderPass> renderPass(this->resourceProvider().findCompatibleRenderPass(
2032             &attachmentsDescriptor, attachmentFlags, selfDepFlags, loadFromResolve));
2033     if (!renderPass) {
2034         return false;
2035     }
2036 
2037     GrThreadSafePipelineBuilder::Stats::ProgramCacheResult stat;
2038 
2039     auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
2040                                     desc,
2041                                     programInfo,
2042                                     renderPass->vkRenderPass(),
2043                                     &stat);
2044     if (!pipelineState) {
2045         return false;
2046     }
2047 
2048     return stat != GrThreadSafePipelineBuilder::Stats::ProgramCacheResult::kHit;
2049 }
2050 
2051 #if defined(GPU_TEST_UTILS)
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const2052 bool GrVkGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
2053     SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
2054 
2055     GrVkImageInfo backend;
2056     if (!GrBackendTextures::GetVkImageInfo(tex, &backend)) {
2057         return false;
2058     }
2059 
2060     if (backend.fImage && backend.fAlloc.fMemory) {
2061         VkMemoryRequirements req;
2062         memset(&req, 0, sizeof(req));
2063         GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
2064                                                                    backend.fImage,
2065                                                                    &req));
2066         // TODO: find a better check
2067         // This will probably fail with a different driver
2068         return (req.size > 0) && (req.size <= 8192 * 8192);
2069     }
2070 
2071     return false;
2072 }
2073 
createTestingOnlyBackendRenderTarget(SkISize dimensions,GrColorType ct,int sampleCnt,GrProtected isProtected)2074 GrBackendRenderTarget GrVkGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
2075                                                                     GrColorType ct,
2076                                                                     int sampleCnt,
2077                                                                     GrProtected isProtected) {
2078     if (dimensions.width()  > this->caps()->maxRenderTargetSize() ||
2079         dimensions.height() > this->caps()->maxRenderTargetSize()) {
2080         return {};
2081     }
2082 
2083     VkFormat vkFormat = this->vkCaps().getFormatFromColorType(ct);
2084 
2085     GrVkImageInfo info;
2086     if (!this->createVkImageForBackendSurface(vkFormat,
2087                                               dimensions,
2088                                               sampleCnt,
2089                                               GrTexturable::kNo,
2090                                               GrRenderable::kYes,
2091                                               skgpu::Mipmapped::kNo,
2092                                               &info,
2093                                               isProtected)) {
2094         return {};
2095     }
2096     return GrBackendRenderTargets::MakeVk(dimensions.width(), dimensions.height(), info);
2097 }
2098 
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & rt)2099 void GrVkGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
2100     SkASSERT(GrBackendApi::kVulkan == rt.fBackend);
2101 
2102     GrVkImageInfo info;
2103     if (GrBackendRenderTargets::GetVkImageInfo(rt, &info)) {
2104         // something in the command buffer may still be using this, so force submit
2105         GrSubmitInfo submitInfo;
2106         submitInfo.fSync = GrSyncCpu::kYes;
2107         SkAssertResult(this->submitCommandBuffer(submitInfo));
2108         GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
2109     }
2110 }
2111 #endif
2112 
2113 ////////////////////////////////////////////////////////////////////////////////
2114 
addBufferMemoryBarrier(const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier) const2115 void GrVkGpu::addBufferMemoryBarrier(const GrManagedResource* resource,
2116                                      VkPipelineStageFlags srcStageMask,
2117                                      VkPipelineStageFlags dstStageMask,
2118                                      bool byRegion,
2119                                      VkBufferMemoryBarrier* barrier) const {
2120     if (!this->currentCommandBuffer()) {
2121         return;
2122     }
2123     SkASSERT(resource);
2124     this->currentCommandBuffer()->pipelineBarrier(this,
2125                                                   resource,
2126                                                   srcStageMask,
2127                                                   dstStageMask,
2128                                                   byRegion,
2129                                                   GrVkCommandBuffer::kBufferMemory_BarrierType,
2130                                                   barrier);
2131 }
addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier) const2132 void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
2133                                      VkPipelineStageFlags dstStageMask,
2134                                      bool byRegion,
2135                                      VkBufferMemoryBarrier* barrier) const {
2136     if (!this->currentCommandBuffer()) {
2137         return;
2138     }
2139     // We don't pass in a resource here to the command buffer. The command buffer only is using it
2140     // to hold a ref, but every place where we add a buffer memory barrier we are doing some other
2141     // command with the buffer on the command buffer. Thus those other commands will already cause
2142     // the command buffer to be holding a ref to the buffer.
2143     this->currentCommandBuffer()->pipelineBarrier(this,
2144                                                   /*resource=*/nullptr,
2145                                                   srcStageMask,
2146                                                   dstStageMask,
2147                                                   byRegion,
2148                                                   GrVkCommandBuffer::kBufferMemory_BarrierType,
2149                                                   barrier);
2150 }
2151 
addImageMemoryBarrier(const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkImageMemoryBarrier * barrier) const2152 void GrVkGpu::addImageMemoryBarrier(const GrManagedResource* resource,
2153                                     VkPipelineStageFlags srcStageMask,
2154                                     VkPipelineStageFlags dstStageMask,
2155                                     bool byRegion,
2156                                     VkImageMemoryBarrier* barrier) const {
2157     // If we are in the middle of destroying or abandoning the context we may hit a release proc
2158     // that triggers the destruction of a GrVkImage. This could cause us to try and transfer the
2159     // VkImage back to the original queue. In this state we don't submit anymore work and we may not
2160     // have a current command buffer. Thus we won't do the queue transfer.
2161     if (!this->currentCommandBuffer()) {
2162         return;
2163     }
2164     SkASSERT(resource);
2165     this->currentCommandBuffer()->pipelineBarrier(this,
2166                                                   resource,
2167                                                   srcStageMask,
2168                                                   dstStageMask,
2169                                                   byRegion,
2170                                                   GrVkCommandBuffer::kImageMemory_BarrierType,
2171                                                   barrier);
2172 }
2173 
prepareSurfacesForBackendAccessAndStateUpdates(SkSpan<GrSurfaceProxy * > proxies,SkSurfaces::BackendSurfaceAccess access,const skgpu::MutableTextureState * newState)2174 void GrVkGpu::prepareSurfacesForBackendAccessAndStateUpdates(
2175         SkSpan<GrSurfaceProxy*> proxies,
2176         SkSurfaces::BackendSurfaceAccess access,
2177         const skgpu::MutableTextureState* newState) {
2178     // Submit the current command buffer to the Queue. Whether we inserted semaphores or not does
2179     // not effect what we do here.
2180     if (!proxies.empty() && (access == SkSurfaces::BackendSurfaceAccess::kPresent || newState)) {
2181         // We currently don't support passing in new surface state for multiple proxies here. The
2182         // only time we have multiple proxies is if we are flushing a yuv SkImage which won't have
2183         // state updates anyways. Additionally if we have a newState than we must not have any
2184         // BackendSurfaceAccess.
2185         SkASSERT(!newState || proxies.size() == 1);
2186         SkASSERT(!newState || access == SkSurfaces::BackendSurfaceAccess::kNoAccess);
2187         GrVkImage* image;
2188         for (GrSurfaceProxy* proxy : proxies) {
2189             SkASSERT(proxy->isInstantiated());
2190             if (GrTexture* tex = proxy->peekTexture()) {
2191                 image = static_cast<GrVkTexture*>(tex)->textureImage();
2192             } else {
2193                 GrRenderTarget* rt = proxy->peekRenderTarget();
2194                 SkASSERT(rt);
2195                 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
2196                 image = vkRT->externalAttachment();
2197             }
2198             if (newState) {
2199                 VkImageLayout newLayout =
2200                     skgpu::MutableTextureStates::GetVkImageLayout(newState);
2201                 uint32_t newIndex =
2202                     skgpu::MutableTextureStates::GetVkQueueFamilyIndex(newState);
2203                 set_layout_and_queue_from_mutable_state(this, image, newLayout, newIndex);
2204             } else {
2205                 SkASSERT(access == SkSurfaces::BackendSurfaceAccess::kPresent);
2206                 image->prepareForPresent(this);
2207             }
2208         }
2209     }
2210 }
2211 
addFinishedCallback(sk_sp<skgpu::RefCntedCallback> finishedCallback)2212 void GrVkGpu::addFinishedCallback(sk_sp<skgpu::RefCntedCallback> finishedCallback) {
2213     SkASSERT(finishedCallback);
2214     fResourceProvider.addFinishedProcToActiveCommandBuffers(std::move(finishedCallback));
2215 }
2216 
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer)2217 void GrVkGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
2218     this->currentCommandBuffer()->addGrBuffer(std::move(buffer));
2219 }
2220 
onSubmitToGpu(const GrSubmitInfo & info)2221 bool GrVkGpu::onSubmitToGpu(const GrSubmitInfo& info) {
2222     return this->submitCommandBuffer(info);
2223 }
2224 
finishOutstandingGpuWork()2225 void GrVkGpu::finishOutstandingGpuWork() {
2226     VK_CALL(QueueWaitIdle(fQueue));
2227 
2228     if (this->vkCaps().mustSyncCommandBuffersWithQueue()) {
2229         fResourceProvider.forceSyncAllCommandBuffers();
2230     }
2231 }
2232 
onReportSubmitHistograms()2233 void GrVkGpu::onReportSubmitHistograms() {
2234 #if SK_HISTOGRAMS_ENABLED
2235     uint64_t allocatedMemory = 0, usedMemory = 0;
2236     std::tie(allocatedMemory, usedMemory) = fMemoryAllocator->totalAllocatedAndUsedMemory();
2237     SkASSERT(usedMemory <= allocatedMemory);
2238     if (allocatedMemory > 0) {
2239         SK_HISTOGRAM_PERCENTAGE("VulkanMemoryAllocator.PercentUsed",
2240                                 (usedMemory * 100) / allocatedMemory);
2241     }
2242     // allocatedMemory is in bytes and need to be reported it in kilobytes. SK_HISTOGRAM_MEMORY_KB
2243     // supports samples up to around 500MB which should support the amounts of memory we allocate.
2244     SK_HISTOGRAM_MEMORY_KB("VulkanMemoryAllocator.AmountAllocated", allocatedMemory >> 10);
2245 #endif  // SK_HISTOGRAMS_ENABLED
2246 }
2247 
copySurfaceAsCopyImage(GrSurface * dst,GrSurface * src,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIPoint & dstPoint)2248 void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
2249                                      GrSurface* src,
2250                                      GrVkImage* dstImage,
2251                                      GrVkImage* srcImage,
2252                                      const SkIRect& srcRect,
2253                                      const SkIPoint& dstPoint) {
2254     if (!this->currentCommandBuffer()) {
2255         return;
2256     }
2257 
2258 #ifdef SK_DEBUG
2259     int dstSampleCnt = dstImage->numSamples();
2260     int srcSampleCnt = srcImage->numSamples();
2261     bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2262     bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2263     VkFormat dstFormat = dstImage->imageFormat();
2264     VkFormat srcFormat;
2265     SkAssertResult(GrBackendFormats::AsVkFormat(dst->backendFormat(), &srcFormat));
2266     SkASSERT(this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2267                                          srcFormat, srcSampleCnt, srcHasYcbcr));
2268 #endif
2269     if (src->isProtected() && !dst->isProtected()) {
2270         SkDebugf("Can't copy from protected memory to non-protected");
2271         return;
2272     }
2273 
2274     // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
2275     // the cache is flushed since it is only being written to.
2276     dstImage->setImageLayout(this,
2277                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2278                              VK_ACCESS_TRANSFER_WRITE_BIT,
2279                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2280                              false);
2281 
2282     srcImage->setImageLayout(this,
2283                              VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2284                              VK_ACCESS_TRANSFER_READ_BIT,
2285                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2286                              false);
2287 
2288     VkImageCopy copyRegion;
2289     memset(&copyRegion, 0, sizeof(VkImageCopy));
2290     copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2291     copyRegion.srcOffset = { srcRect.fLeft, srcRect.fTop, 0 };
2292     copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2293     copyRegion.dstOffset = { dstPoint.fX, dstPoint.fY, 0 };
2294     copyRegion.extent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1 };
2295 
2296     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2297     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2298     this->currentCommandBuffer()->copyImage(this,
2299                                             srcImage,
2300                                             VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2301                                             dstImage,
2302                                             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2303                                             1,
2304                                             &copyRegion);
2305 
2306     SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2307                                         srcRect.width(), srcRect.height());
2308     // The rect is already in device space so we pass in kTopLeft so no flip is done.
2309     this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2310 }
2311 
copySurfaceAsBlit(GrSurface * dst,GrSurface * src,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIRect & dstRect,GrSamplerState::Filter filter)2312 void GrVkGpu::copySurfaceAsBlit(GrSurface* dst,
2313                                 GrSurface* src,
2314                                 GrVkImage* dstImage,
2315                                 GrVkImage* srcImage,
2316                                 const SkIRect& srcRect,
2317                                 const SkIRect& dstRect,
2318                                 GrSamplerState::Filter filter) {
2319     if (!this->currentCommandBuffer()) {
2320         return;
2321     }
2322 
2323 #ifdef SK_DEBUG
2324     int dstSampleCnt = dstImage->numSamples();
2325     int srcSampleCnt = srcImage->numSamples();
2326     bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2327     bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2328     VkFormat dstFormat = dstImage->imageFormat();
2329     VkFormat srcFormat;
2330     SkAssertResult(GrBackendFormats::AsVkFormat(dst->backendFormat(), &srcFormat));
2331     SkASSERT(this->vkCaps().canCopyAsBlit(dstFormat,
2332                                           dstSampleCnt,
2333                                           dstImage->isLinearTiled(),
2334                                           dstHasYcbcr,
2335                                           srcFormat,
2336                                           srcSampleCnt,
2337                                           srcImage->isLinearTiled(),
2338                                           srcHasYcbcr));
2339 
2340 #endif
2341     if (src->isProtected() && !dst->isProtected()) {
2342         SkDebugf("Can't copy from protected memory to non-protected");
2343         return;
2344     }
2345 
2346     dstImage->setImageLayout(this,
2347                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2348                              VK_ACCESS_TRANSFER_WRITE_BIT,
2349                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2350                              false);
2351 
2352     srcImage->setImageLayout(this,
2353                              VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2354                              VK_ACCESS_TRANSFER_READ_BIT,
2355                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2356                              false);
2357 
2358     VkImageBlit blitRegion;
2359     memset(&blitRegion, 0, sizeof(VkImageBlit));
2360     blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2361     blitRegion.srcOffsets[0] = { srcRect.fLeft, srcRect.fTop, 0 };
2362     blitRegion.srcOffsets[1] = { srcRect.fRight, srcRect.fBottom, 1 };
2363     blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2364     blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
2365     blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 1 };
2366 
2367     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2368     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2369     this->currentCommandBuffer()->blitImage(this,
2370                                             *srcImage,
2371                                             *dstImage,
2372                                             1,
2373                                             &blitRegion,
2374                                             filter == GrSamplerState::Filter::kNearest ?
2375                                                     VK_FILTER_NEAREST : VK_FILTER_LINEAR);
2376 
2377     // The rect is already in device space so we pass in kTopLeft so no flip is done.
2378     this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2379 }
2380 
copySurfaceAsResolve(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)2381 void GrVkGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
2382                                    const SkIPoint& dstPoint) {
2383     if (src->isProtected() && !dst->isProtected()) {
2384         SkDebugf("Can't copy from protected memory to non-protected");
2385         return;
2386     }
2387     GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
2388     this->resolveImage(dst, srcRT, srcRect, dstPoint);
2389     SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2390                                         srcRect.width(), srcRect.height());
2391     // The rect is already in device space so we pass in kTopLeft so no flip is done.
2392     this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2393 }
2394 
onCopySurface(GrSurface * dst,const SkIRect & dstRect,GrSurface * src,const SkIRect & srcRect,GrSamplerState::Filter filter)2395 bool GrVkGpu::onCopySurface(GrSurface* dst, const SkIRect& dstRect,
2396                             GrSurface* src, const SkIRect& srcRect,
2397                             GrSamplerState::Filter filter) {
2398 #ifdef SK_DEBUG
2399     if (GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget())) {
2400         SkASSERT(!srcRT->wrapsSecondaryCommandBuffer());
2401     }
2402     if (GrVkRenderTarget* dstRT = static_cast<GrVkRenderTarget*>(dst->asRenderTarget())) {
2403         SkASSERT(!dstRT->wrapsSecondaryCommandBuffer());
2404     }
2405 #endif
2406     if (src->isProtected() && !dst->isProtected()) {
2407         SkDebugf("Can't copy from protected memory to non-protected");
2408         return false;
2409     }
2410 
2411     GrVkImage* dstImage;
2412     GrVkImage* srcImage;
2413     GrRenderTarget* dstRT = dst->asRenderTarget();
2414     if (dstRT) {
2415         GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
2416         if (vkRT->wrapsSecondaryCommandBuffer()) {
2417             return false;
2418         }
2419         // This will technically return true for single sample rts that used DMSAA in which case we
2420         // don't have to pick the resolve attachment. But in that case the resolve and color
2421         // attachments will be the same anyways.
2422         if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2423             dstImage = vkRT->resolveAttachment();
2424         } else {
2425             dstImage = vkRT->colorAttachment();
2426         }
2427     } else if (dst->asTexture()) {
2428         dstImage = static_cast<GrVkTexture*>(dst->asTexture())->textureImage();
2429     } else {
2430         // The surface in a GrAttachment already
2431         dstImage = static_cast<GrVkImage*>(dst);
2432     }
2433     GrRenderTarget* srcRT = src->asRenderTarget();
2434     if (srcRT) {
2435         GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(srcRT);
2436         // This will technically return true for single sample rts that used DMSAA in which case we
2437         // don't have to pick the resolve attachment. But in that case the resolve and color
2438         // attachments will be the same anyways.
2439         if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2440             srcImage = vkRT->resolveAttachment();
2441         } else {
2442             srcImage = vkRT->colorAttachment();
2443         }
2444     } else if (src->asTexture()) {
2445         SkASSERT(src->asTexture());
2446         srcImage = static_cast<GrVkTexture*>(src->asTexture())->textureImage();
2447     } else {
2448         // The surface in a GrAttachment already
2449         srcImage = static_cast<GrVkImage*>(src);
2450     }
2451 
2452     VkFormat dstFormat = dstImage->imageFormat();
2453     VkFormat srcFormat = srcImage->imageFormat();
2454 
2455     int dstSampleCnt = dstImage->numSamples();
2456     int srcSampleCnt = srcImage->numSamples();
2457 
2458     bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2459     bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2460 
2461     if (srcRect.size() == dstRect.size()) {
2462         // Prefer resolves or copy-image commands when there is no scaling
2463         const SkIPoint dstPoint = dstRect.topLeft();
2464         if (this->vkCaps().canCopyAsResolve(dstFormat, dstSampleCnt, dstHasYcbcr,
2465                                             srcFormat, srcSampleCnt, srcHasYcbcr)) {
2466             this->copySurfaceAsResolve(dst, src, srcRect, dstPoint);
2467             return true;
2468         }
2469 
2470         if (this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2471                                         srcFormat, srcSampleCnt, srcHasYcbcr)) {
2472             this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
2473             return true;
2474         }
2475     }
2476 
2477     if (this->vkCaps().canCopyAsBlit(dstFormat,
2478                                      dstSampleCnt,
2479                                      dstImage->isLinearTiled(),
2480                                      dstHasYcbcr,
2481                                      srcFormat,
2482                                      srcSampleCnt,
2483                                      srcImage->isLinearTiled(),
2484                                      srcHasYcbcr)) {
2485         this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstRect, filter);
2486         return true;
2487     }
2488 
2489     return false;
2490 }
2491 
onReadPixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)2492 bool GrVkGpu::onReadPixels(GrSurface* surface,
2493                            SkIRect rect,
2494                            GrColorType surfaceColorType,
2495                            GrColorType dstColorType,
2496                            void* buffer,
2497                            size_t rowBytes) {
2498     if (surface->isProtected()) {
2499         return false;
2500     }
2501 
2502     if (!this->currentCommandBuffer()) {
2503         return false;
2504     }
2505 
2506     GrVkImage* image = nullptr;
2507     GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget());
2508     if (rt) {
2509         // Reading from render targets that wrap a secondary command buffer is not allowed since
2510         // it would require us to know the VkImage, which we don't have, as well as need us to
2511         // stop and start the VkRenderPass which we don't have access to.
2512         if (rt->wrapsSecondaryCommandBuffer()) {
2513             return false;
2514         }
2515         image = rt->nonMSAAAttachment();
2516     } else {
2517         image = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
2518     }
2519 
2520     if (!image) {
2521         return false;
2522     }
2523 
2524     if (dstColorType == GrColorType::kUnknown ||
2525         dstColorType != this->vkCaps().transferColorType(image->imageFormat(), surfaceColorType)) {
2526         return false;
2527     }
2528 
2529     // Change layout of our target so it can be used as copy
2530     image->setImageLayout(this,
2531                           VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2532                           VK_ACCESS_TRANSFER_READ_BIT,
2533                           VK_PIPELINE_STAGE_TRANSFER_BIT,
2534                           false);
2535 
2536     size_t bpp = GrColorTypeBytesPerPixel(dstColorType);
2537     if (skgpu::VkFormatBytesPerBlock(image->imageFormat()) != bpp) {
2538         return false;
2539     }
2540     size_t tightRowBytes = bpp*rect.width();
2541 
2542     VkBufferImageCopy region;
2543     memset(&region, 0, sizeof(VkBufferImageCopy));
2544     VkOffset3D offset = { rect.left(), rect.top(), 0 };
2545     region.imageOffset = offset;
2546     region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
2547 
2548     size_t transBufferRowBytes = bpp * region.imageExtent.width;
2549     size_t imageRows = region.imageExtent.height;
2550     GrResourceProvider* resourceProvider = this->getContext()->priv().resourceProvider();
2551     sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
2552             transBufferRowBytes * imageRows,
2553             GrGpuBufferType::kXferGpuToCpu,
2554             kDynamic_GrAccessPattern,
2555             GrResourceProvider::ZeroInit::kNo);
2556 
2557     if (!transferBuffer) {
2558         return false;
2559     }
2560 
2561     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
2562 
2563     // Copy the image to a buffer so we can map it to cpu memory
2564     region.bufferOffset = 0;
2565     region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below.
2566     region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
2567     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2568 
2569     this->currentCommandBuffer()->copyImageToBuffer(this,
2570                                                     image,
2571                                                     VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2572                                                     transferBuffer,
2573                                                     1,
2574                                                     &region);
2575 
2576     // make sure the copy to buffer has finished
2577     vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
2578                                VK_ACCESS_HOST_READ_BIT,
2579                                VK_PIPELINE_STAGE_TRANSFER_BIT,
2580                                VK_PIPELINE_STAGE_HOST_BIT,
2581                                false);
2582 
2583     // We need to submit the current command buffer to the Queue and make sure it finishes before
2584     // we can copy the data out of the buffer.
2585     GrSubmitInfo submitInfo;
2586     submitInfo.fSync = GrSyncCpu::kYes;
2587     if (!this->submitCommandBuffer(submitInfo)) {
2588         return false;
2589     }
2590     void* mappedMemory = transferBuffer->map();
2591     if (!mappedMemory) {
2592         return false;
2593     }
2594 
2595     SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, tightRowBytes, rect.height());
2596 
2597     transferBuffer->unmap();
2598     return true;
2599 }
2600 
beginRenderPass(const GrVkRenderPass * renderPass,sk_sp<const GrVkFramebuffer> framebuffer,const VkClearValue * colorClear,const GrSurface * target,const SkIRect & renderPassBounds,bool forSecondaryCB)2601 bool GrVkGpu::beginRenderPass(const GrVkRenderPass* renderPass,
2602                               sk_sp<const GrVkFramebuffer> framebuffer,
2603                               const VkClearValue* colorClear,
2604                               const GrSurface* target,
2605                               const SkIRect& renderPassBounds,
2606                               bool forSecondaryCB) {
2607     if (!this->currentCommandBuffer()) {
2608         return false;
2609     }
2610     SkASSERT (!framebuffer->isExternal());
2611 
2612 #ifdef SK_DEBUG
2613     uint32_t index;
2614     bool result = renderPass->colorAttachmentIndex(&index);
2615     SkASSERT(result && 0 == index);
2616     result = renderPass->stencilAttachmentIndex(&index);
2617     if (result) {
2618         SkASSERT(1 == index);
2619     }
2620 #endif
2621     VkClearValue clears[3];
2622     int stencilIndex = renderPass->hasResolveAttachment() ? 2 : 1;
2623     clears[0].color = colorClear->color;
2624     clears[stencilIndex].depthStencil.depth = 0.0f;
2625     clears[stencilIndex].depthStencil.stencil = 0;
2626 
2627    return this->currentCommandBuffer()->beginRenderPass(
2628         this, renderPass, std::move(framebuffer), clears, target, renderPassBounds, forSecondaryCB);
2629 }
2630 
endRenderPass(GrRenderTarget * target,GrSurfaceOrigin origin,const SkIRect & bounds)2631 void GrVkGpu::endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin,
2632                             const SkIRect& bounds) {
2633     // We had a command buffer when we started the render pass, we should have one now as well.
2634     SkASSERT(this->currentCommandBuffer());
2635     this->currentCommandBuffer()->endRenderPass(this);
2636     this->didWriteToSurface(target, origin, &bounds);
2637 }
2638 
checkVkResult(VkResult result)2639 bool GrVkGpu::checkVkResult(VkResult result) {
2640     switch (result) {
2641         case VK_SUCCESS:
2642             return true;
2643         case VK_ERROR_DEVICE_LOST:
2644             if (!fDeviceIsLost) {
2645                 // Callback should only be invoked once, and device should be marked as lost first.
2646                 fDeviceIsLost = true;
2647                 skgpu::InvokeDeviceLostCallback(vkInterface(),
2648                                                 device(),
2649                                                 fDeviceLostContext,
2650                                                 fDeviceLostProc,
2651                                                 vkCaps().supportsDeviceFaultInfo());
2652             }
2653             return false;
2654         case VK_ERROR_OUT_OF_DEVICE_MEMORY:
2655         case VK_ERROR_OUT_OF_HOST_MEMORY:
2656             this->setOOMed();
2657             return false;
2658         default:
2659             return false;
2660     }
2661 }
2662 
submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer)2663 void GrVkGpu::submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
2664     if (!this->currentCommandBuffer()) {
2665         return;
2666     }
2667     this->currentCommandBuffer()->executeCommands(this, std::move(buffer));
2668 }
2669 
submit(GrOpsRenderPass * renderPass)2670 void GrVkGpu::submit(GrOpsRenderPass* renderPass) {
2671     SkASSERT(fCachedOpsRenderPass.get() == renderPass);
2672 
2673     fCachedOpsRenderPass->submit();
2674     fCachedOpsRenderPass->reset();
2675 }
2676 
makeSemaphore(bool isOwned)2677 [[nodiscard]] std::unique_ptr<GrSemaphore> GrVkGpu::makeSemaphore(bool isOwned) {
2678     return GrVkSemaphore::Make(this, isOwned);
2679 }
2680 
wrapBackendSemaphore(const GrBackendSemaphore & semaphore,GrSemaphoreWrapType wrapType,GrWrapOwnership ownership)2681 std::unique_ptr<GrSemaphore> GrVkGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
2682                                                            GrSemaphoreWrapType wrapType,
2683                                                            GrWrapOwnership ownership) {
2684     return GrVkSemaphore::MakeWrapped(this, GrBackendSemaphores::GetVkSemaphore(semaphore),
2685                                       wrapType, ownership);
2686 }
2687 
insertSemaphore(GrSemaphore * semaphore)2688 void GrVkGpu::insertSemaphore(GrSemaphore* semaphore) {
2689     SkASSERT(semaphore);
2690 
2691     GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2692 
2693     GrVkSemaphore::Resource* resource = vkSem->getResource();
2694     if (resource->shouldSignal()) {
2695         resource->ref();
2696         fSemaphoresToSignal.push_back(resource);
2697     }
2698 }
2699 
waitSemaphore(GrSemaphore * semaphore)2700 void GrVkGpu::waitSemaphore(GrSemaphore* semaphore) {
2701     SkASSERT(semaphore);
2702 
2703     GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2704 
2705     GrVkSemaphore::Resource* resource = vkSem->getResource();
2706     if (resource->shouldWait()) {
2707         resource->ref();
2708         fSemaphoresToWaitOn.push_back(resource);
2709     }
2710 }
2711 
prepareTextureForCrossContextUsage(GrTexture * texture)2712 std::unique_ptr<GrSemaphore> GrVkGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
2713     SkASSERT(texture);
2714     GrVkImage* vkTexture = static_cast<GrVkTexture*>(texture)->textureImage();
2715     vkTexture->setImageLayout(this,
2716                               VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
2717                               VK_ACCESS_SHADER_READ_BIT,
2718                               VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
2719                               false);
2720     // TODO: should we have a way to notify the caller that this has failed? Currently if the submit
2721     // fails (caused by DEVICE_LOST) this will just cause us to fail the next use of the gpu.
2722     // Eventually we will abandon the whole GPU if this fails.
2723     this->submitToGpu();
2724 
2725     // The image layout change serves as a barrier, so no semaphore is needed.
2726     // If we ever decide we need to return a semaphore here, we need to make sure GrVkSemaphore is
2727     // thread safe so that only the first thread that tries to use the semaphore actually submits
2728     // it. This additionally would also require thread safety in command buffer submissions to
2729     // queues in general.
2730     return nullptr;
2731 }
2732 
addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)2733 void GrVkGpu::addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
2734     fDrawables.emplace_back(std::move(drawable));
2735 }
2736 
storeVkPipelineCacheData()2737 void GrVkGpu::storeVkPipelineCacheData() {
2738     if (this->getContext()->priv().getPersistentCache()) {
2739         this->resourceProvider().storePipelineCacheData();
2740     }
2741 }
2742