1 /*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/graphite/vk/VulkanTexture.h"
9
10 #include "include/gpu/MutableTextureState.h"
11 #include "include/gpu/graphite/vk/VulkanGraphiteTypes.h"
12 #include "include/gpu/vk/VulkanMutableTextureState.h"
13 #include "src/core/SkMipmap.h"
14 #include "src/gpu/graphite/Log.h"
15 #include "src/gpu/graphite/vk/VulkanCaps.h"
16 #include "src/gpu/graphite/vk/VulkanCommandBuffer.h"
17 #include "src/gpu/graphite/vk/VulkanGraphiteTypesPriv.h"
18 #include "src/gpu/graphite/vk/VulkanGraphiteUtilsPriv.h"
19 #include "src/gpu/graphite/vk/VulkanResourceProvider.h"
20 #include "src/gpu/graphite/vk/VulkanSharedContext.h"
21 #include "src/gpu/vk/VulkanMemory.h"
22 #include "src/gpu/vk/VulkanMutableTextureStatePriv.h"
23
24 namespace skgpu::graphite {
25
MakeVkImage(const VulkanSharedContext * sharedContext,SkISize dimensions,const TextureInfo & info,CreatedImageInfo * outInfo)26 bool VulkanTexture::MakeVkImage(const VulkanSharedContext* sharedContext,
27 SkISize dimensions,
28 const TextureInfo& info,
29 CreatedImageInfo* outInfo) {
30 SkASSERT(outInfo);
31 const VulkanCaps& caps = sharedContext->vulkanCaps();
32
33 if (dimensions.isEmpty()) {
34 SKGPU_LOG_E("Tried to create VkImage with empty dimensions.");
35 return false;
36 }
37 if (dimensions.width() > caps.maxTextureSize() ||
38 dimensions.height() > caps.maxTextureSize()) {
39 SKGPU_LOG_E("Tried to create VkImage with too large a size.");
40 return false;
41 }
42
43 if ((info.isProtected() == Protected::kYes) != caps.protectedSupport()) {
44 SKGPU_LOG_E("Tried to create %s VkImage in %s Context.",
45 info.isProtected() == Protected::kYes ? "protected" : "unprotected",
46 caps.protectedSupport() ? "protected" : "unprotected");
47 return false;
48 }
49
50 const VulkanTextureSpec spec = TextureInfos::GetVulkanTextureSpec(info);
51
52 bool isLinear = spec.fImageTiling == VK_IMAGE_TILING_LINEAR;
53 VkImageLayout initialLayout = isLinear ? VK_IMAGE_LAYOUT_PREINITIALIZED
54 : VK_IMAGE_LAYOUT_UNDEFINED;
55
56 // Create Image
57 VkSampleCountFlagBits vkSamples;
58 if (!SampleCountToVkSampleCount(info.numSamples(), &vkSamples)) {
59 SKGPU_LOG_E("Failed creating VkImage because we could not covert the number of samples: "
60 "%u to a VkSampleCountFlagBits.", info.numSamples());
61 return false;
62 }
63
64 SkASSERT(!isLinear || vkSamples == VK_SAMPLE_COUNT_1_BIT);
65
66 VkImageCreateFlags createflags = 0;
67 if (info.isProtected() == Protected::kYes && caps.protectedSupport()) {
68 createflags |= VK_IMAGE_CREATE_PROTECTED_BIT;
69 }
70
71 uint32_t numMipLevels = 1;
72 if (info.mipmapped() == Mipmapped::kYes) {
73 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
74 }
75
76 uint32_t width = static_cast<uint32_t>(dimensions.fWidth);
77 uint32_t height = static_cast<uint32_t>(dimensions.fHeight);
78
79 const VkImageCreateInfo imageCreateInfo = {
80 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
81 nullptr, // pNext
82 createflags, // VkImageCreateFlags
83 VK_IMAGE_TYPE_2D, // VkImageType
84 spec.fFormat, // VkFormat
85 { width, height, 1 }, // VkExtent3D
86 numMipLevels, // mipLevels
87 1, // arrayLayers
88 vkSamples, // samples
89 spec.fImageTiling, // VkImageTiling
90 spec.fImageUsageFlags, // VkImageUsageFlags
91 spec.fSharingMode, // VkSharingMode
92 0, // queueFamilyCount
93 nullptr, // pQueueFamilyIndices
94 initialLayout // initialLayout
95 };
96
97 auto device = sharedContext->device();
98
99 VkImage image = VK_NULL_HANDLE;
100 VkResult result;
101 VULKAN_CALL_RESULT(
102 sharedContext, result, CreateImage(device, &imageCreateInfo, nullptr, &image));
103 if (result != VK_SUCCESS) {
104 SKGPU_LOG_E("Failed call to vkCreateImage with error: %d", result);
105 return false;
106 }
107
108 auto allocator = sharedContext->memoryAllocator();
109 bool forceDedicatedMemory = caps.shouldAlwaysUseDedicatedImageMemory();
110 bool useLazyAllocation =
111 SkToBool(spec.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT);
112
113 auto checkResult = [sharedContext](VkResult result) {
114 return sharedContext->checkVkResult(result);
115 };
116 if (!skgpu::VulkanMemory::AllocImageMemory(allocator,
117 image,
118 info.isProtected(),
119 forceDedicatedMemory,
120 useLazyAllocation,
121 checkResult,
122 &outInfo->fMemoryAlloc)) {
123 VULKAN_CALL(sharedContext->interface(), DestroyImage(device, image, nullptr));
124 return false;
125 }
126
127 if (useLazyAllocation &&
128 !SkToBool(outInfo->fMemoryAlloc.fFlags & skgpu::VulkanAlloc::kLazilyAllocated_Flag)) {
129 SKGPU_LOG_E("Failed allocate lazy vulkan memory when requested");
130 skgpu::VulkanMemory::FreeImageMemory(allocator, outInfo->fMemoryAlloc);
131 return false;
132 }
133
134 VULKAN_CALL_RESULT(
135 sharedContext,
136 result,
137 BindImageMemory(
138 device, image, outInfo->fMemoryAlloc.fMemory, outInfo->fMemoryAlloc.fOffset));
139 if (result != VK_SUCCESS) {
140 skgpu::VulkanMemory::FreeImageMemory(allocator, outInfo->fMemoryAlloc);
141 VULKAN_CALL(sharedContext->interface(), DestroyImage(device, image, nullptr));
142 return false;
143 }
144
145 outInfo->fImage = image;
146 outInfo->fMutableState = sk_make_sp<MutableTextureState>(
147 skgpu::MutableTextureStates::MakeVulkan(initialLayout, VK_QUEUE_FAMILY_IGNORED));
148 return true;
149 }
150
Make(const VulkanSharedContext * sharedContext,SkISize dimensions,const TextureInfo & info,skgpu::Budgeted budgeted,sk_sp<VulkanYcbcrConversion> ycbcrConversion)151 sk_sp<Texture> VulkanTexture::Make(const VulkanSharedContext* sharedContext,
152 SkISize dimensions,
153 const TextureInfo& info,
154 skgpu::Budgeted budgeted,
155 sk_sp<VulkanYcbcrConversion> ycbcrConversion) {
156 CreatedImageInfo imageInfo;
157 if (!MakeVkImage(sharedContext, dimensions, info, &imageInfo)) {
158 return nullptr;
159 }
160
161 return sk_sp<Texture>(new VulkanTexture(sharedContext,
162 dimensions,
163 info,
164 std::move(imageInfo.fMutableState),
165 imageInfo.fImage,
166 imageInfo.fMemoryAlloc,
167 Ownership::kOwned,
168 budgeted,
169 std::move(ycbcrConversion)));
170 }
171
MakeWrapped(const VulkanSharedContext * sharedContext,SkISize dimensions,const TextureInfo & info,sk_sp<MutableTextureState> mutableState,VkImage image,const VulkanAlloc & alloc,sk_sp<VulkanYcbcrConversion> ycbcrConversion)172 sk_sp<Texture> VulkanTexture::MakeWrapped(const VulkanSharedContext* sharedContext,
173 SkISize dimensions,
174 const TextureInfo& info,
175 sk_sp<MutableTextureState> mutableState,
176 VkImage image,
177 const VulkanAlloc& alloc,
178 sk_sp<VulkanYcbcrConversion> ycbcrConversion) {
179 return sk_sp<Texture>(new VulkanTexture(sharedContext,
180 dimensions,
181 info,
182 std::move(mutableState),
183 image,
184 alloc,
185 Ownership::kWrapped,
186 skgpu::Budgeted::kNo,
187 std::move(ycbcrConversion)));
188 }
189
vk_format_to_aspect_flags(VkFormat format)190 VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
191 switch (format) {
192 case VK_FORMAT_S8_UINT:
193 return VK_IMAGE_ASPECT_STENCIL_BIT;
194 case VK_FORMAT_D16_UNORM:
195 [[fallthrough]];
196 case VK_FORMAT_D32_SFLOAT:
197 return VK_IMAGE_ASPECT_DEPTH_BIT;
198 case VK_FORMAT_D24_UNORM_S8_UINT:
199 [[fallthrough]];
200 case VK_FORMAT_D32_SFLOAT_S8_UINT:
201 return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
202 default:
203 return VK_IMAGE_ASPECT_COLOR_BIT;
204 }
205 }
206
setImageLayoutAndQueueIndex(VulkanCommandBuffer * cmdBuffer,VkImageLayout newLayout,VkAccessFlags dstAccessMask,VkPipelineStageFlags dstStageMask,bool byRegion,uint32_t newQueueFamilyIndex) const207 void VulkanTexture::setImageLayoutAndQueueIndex(VulkanCommandBuffer* cmdBuffer,
208 VkImageLayout newLayout,
209 VkAccessFlags dstAccessMask,
210 VkPipelineStageFlags dstStageMask,
211 bool byRegion,
212 uint32_t newQueueFamilyIndex) const {
213
214 SkASSERT(newLayout == this->currentLayout() ||
215 (VK_IMAGE_LAYOUT_UNDEFINED != newLayout &&
216 VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout));
217 VkImageLayout currentLayout = this->currentLayout();
218 uint32_t currentQueueIndex = this->currentQueueFamilyIndex();
219
220 VulkanTextureInfo textureInfo;
221 SkAssertResult(TextureInfos::GetVulkanTextureInfo(this->textureInfo(), &textureInfo));
222 auto sharedContext = static_cast<const VulkanSharedContext*>(this->sharedContext());
223
224 // Enable the following block on new devices to test that their lazy images stay at 0 memory use
225 #if 0
226 auto device = sharedContext->device();
227 if (fAlloc.fFlags & skgpu::VulkanAlloc::kLazilyAllocated_Flag) {
228 VkDeviceSize size;
229 VULKAN_CALL(sharedContext->interface(), GetDeviceMemoryCommitment(device, fAlloc.fMemory, &size));
230
231 SkDebugf("Lazy Image. This: %p, image: %d, size: %d\n", this, fImage, size);
232 }
233 #endif
234 #ifdef SK_DEBUG
235 if (textureInfo.fSharingMode == VK_SHARING_MODE_CONCURRENT) {
236 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
237 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
238 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
239 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
240 } else {
241 SkASSERT(newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
242 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
243 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED);
244 }
245 } else {
246 SkASSERT(textureInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE);
247 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED ||
248 currentQueueIndex == sharedContext->queueIndex()) {
249 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
250 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
251 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT ||
252 currentQueueIndex == sharedContext->queueIndex());
253 } else if (newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
254 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) {
255 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
256 currentQueueIndex == sharedContext->queueIndex());
257 }
258 }
259 #endif
260
261 if (textureInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
262 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
263 newQueueFamilyIndex = sharedContext->queueIndex();
264 }
265 if (currentQueueIndex == VK_QUEUE_FAMILY_IGNORED) {
266 currentQueueIndex = sharedContext->queueIndex();
267 }
268 }
269
270 // If the old and new layout are the same and the layout is a read only layout, there is no need
271 // to put in a barrier unless we also need to switch queues.
272 if (newLayout == currentLayout && currentQueueIndex == newQueueFamilyIndex &&
273 (VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == currentLayout ||
274 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == currentLayout ||
275 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == currentLayout)) {
276 return;
277 }
278
279 VkAccessFlags srcAccessMask = VulkanTexture::LayoutToSrcAccessMask(currentLayout);
280 VkPipelineStageFlags srcStageMask = VulkanTexture::LayoutToPipelineSrcStageFlags(currentLayout);
281
282 VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(textureInfo.fFormat);
283 uint32_t numMipLevels = 1;
284 SkISize dimensions = this->dimensions();
285 if (this->mipmapped() == Mipmapped::kYes) {
286 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
287 }
288 VkImageMemoryBarrier imageMemoryBarrier = {
289 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
290 nullptr, // pNext
291 srcAccessMask, // srcAccessMask
292 dstAccessMask, // dstAccessMask
293 currentLayout, // oldLayout
294 newLayout, // newLayout
295 currentQueueIndex, // srcQueueFamilyIndex
296 newQueueFamilyIndex, // dstQueueFamilyIndex
297 fImage, // image
298 { aspectFlags, 0, numMipLevels, 0, 1 } // subresourceRange
299 };
300 SkASSERT(srcAccessMask == imageMemoryBarrier.srcAccessMask);
301 cmdBuffer->addImageMemoryBarrier(this, srcStageMask, dstStageMask, byRegion,
302 &imageMemoryBarrier);
303
304 skgpu::MutableTextureStates::SetVkImageLayout(this->mutableState(), newLayout);
305 skgpu::MutableTextureStates::SetVkQueueFamilyIndex(this->mutableState(), newQueueFamilyIndex);
306 }
307
VulkanTexture(const VulkanSharedContext * sharedContext,SkISize dimensions,const TextureInfo & info,sk_sp<MutableTextureState> mutableState,VkImage image,const VulkanAlloc & alloc,Ownership ownership,skgpu::Budgeted budgeted,sk_sp<VulkanYcbcrConversion> ycbcrConversion)308 VulkanTexture::VulkanTexture(const VulkanSharedContext* sharedContext,
309 SkISize dimensions,
310 const TextureInfo& info,
311 sk_sp<MutableTextureState> mutableState,
312 VkImage image,
313 const VulkanAlloc& alloc,
314 Ownership ownership,
315 skgpu::Budgeted budgeted,
316 sk_sp<VulkanYcbcrConversion> ycbcrConversion)
317 : Texture(sharedContext, dimensions, info, std::move(mutableState), ownership, budgeted)
318 , fImage(image)
319 , fMemoryAlloc(alloc)
320 , fYcbcrConversion(std::move(ycbcrConversion)) {}
321
freeGpuData()322 void VulkanTexture::freeGpuData() {
323 // Need to delete any ImageViews first
324 fImageViews.clear();
325
326 // If the texture is wrapped we don't own this data
327 if (this->ownership() != Ownership::kWrapped) {
328 auto sharedContext = static_cast<const VulkanSharedContext*>(this->sharedContext());
329 VULKAN_CALL(sharedContext->interface(),
330 DestroyImage(sharedContext->device(), fImage, nullptr));
331 skgpu::VulkanMemory::FreeImageMemory(sharedContext->memoryAllocator(), fMemoryAlloc);
332 }
333 }
334
updateImageLayout(VkImageLayout newLayout)335 void VulkanTexture::updateImageLayout(VkImageLayout newLayout) {
336 skgpu::MutableTextureStates::SetVkImageLayout(this->mutableState(), newLayout);
337 }
338
currentLayout() const339 VkImageLayout VulkanTexture::currentLayout() const {
340 return skgpu::MutableTextureStates::GetVkImageLayout(this->mutableState());
341 }
342
currentQueueFamilyIndex() const343 uint32_t VulkanTexture::currentQueueFamilyIndex() const {
344 return skgpu::MutableTextureStates::GetVkQueueFamilyIndex(this->mutableState());
345 }
346
LayoutToPipelineSrcStageFlags(const VkImageLayout layout)347 VkPipelineStageFlags VulkanTexture::LayoutToPipelineSrcStageFlags(const VkImageLayout layout) {
348 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
349 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
350 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
351 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
352 return VK_PIPELINE_STAGE_TRANSFER_BIT;
353 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
354 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
355 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
356 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
357 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
358 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
359 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
360 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
361 return VK_PIPELINE_STAGE_HOST_BIT;
362 } else if (VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
363 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
364 }
365
366 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
367 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
368 }
369
LayoutToSrcAccessMask(const VkImageLayout layout)370 VkAccessFlags VulkanTexture::LayoutToSrcAccessMask(const VkImageLayout layout) {
371 // Currently we assume we will never being doing any explict shader writes (this doesn't include
372 // color attachment or depth/stencil writes). So we will ignore the
373 // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
374
375 // We can only directly access the host memory if we are in preinitialized or general layout,
376 // and the image is linear.
377 // TODO: Add check for linear here so we are not always adding host to general, and we should
378 // only be in preinitialized if we are linear
379 VkAccessFlags flags = 0;
380 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
381 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
382 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
383 VK_ACCESS_TRANSFER_WRITE_BIT |
384 VK_ACCESS_HOST_WRITE_BIT;
385 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
386 flags = VK_ACCESS_HOST_WRITE_BIT;
387 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
388 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
389 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
390 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
391 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
392 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
393 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
394 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout ||
395 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
396 // There are no writes that need to be made available
397 flags = 0;
398 }
399 return flags;
400 }
401
getImageView(VulkanImageView::Usage usage) const402 const VulkanImageView* VulkanTexture::getImageView(VulkanImageView::Usage usage) const {
403 for (int i = 0; i < fImageViews.size(); ++i) {
404 if (fImageViews[i]->usage() == usage) {
405 return fImageViews[i].get();
406 }
407 }
408
409 auto sharedContext = static_cast<const VulkanSharedContext*>(this->sharedContext());
410 VulkanTextureInfo vkTexInfo;
411 SkAssertResult(TextureInfos::GetVulkanTextureInfo(this->textureInfo(), &vkTexInfo));
412 int miplevels = this->textureInfo().mipmapped() == Mipmapped::kYes
413 ? SkMipmap::ComputeLevelCount(this->dimensions().width(),
414 this->dimensions().height()) + 1
415 : 1;
416 auto imageView = VulkanImageView::Make(sharedContext,
417 fImage,
418 vkTexInfo.fFormat,
419 usage,
420 miplevels,
421 fYcbcrConversion);
422 return fImageViews.push_back(std::move(imageView)).get();
423 }
424
supportsInputAttachmentUsage() const425 bool VulkanTexture::supportsInputAttachmentUsage() const {
426 return (TextureInfos::GetVkUsageFlags(this->textureInfo()) &
427 VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
428 }
429
430 } // namespace skgpu::graphite
431