/* * Copyright 2022 Google LLC * * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ #ifndef skgpu_graphite_VulkanGraphiteUtilsPriv_DEFINED #define skgpu_graphite_VulkanGraphiteUtilsPriv_DEFINED #include "include/core/SkSpan.h" #include "include/gpu/vk/VulkanTypes.h" #include "src/gpu/graphite/DescriptorData.h" #include "src/gpu/graphite/Log.h" #include "src/gpu/vk/VulkanInterface.h" #include // Helper macros to call functions on the VulkanInterface without checking for errors. Note: This // cannot require a VulkanSharedContext because driver calls are needed before the shared context // has been initialized. #define VULKAN_CALL(IFACE, X) (IFACE)->fFunctions.f##X // Must be called before checkVkResult, since this does not log if the VulkanSharedContext is // already considering the device to be lost. #define VULKAN_LOG_IF_NOT_SUCCESS(SHARED_CONTEXT, RESULT, X, ...) \ do { \ if (RESULT != VK_SUCCESS && !(SHARED_CONTEXT)->isDeviceLost()) { \ SkDebugf("Failed vulkan call. Error: %d, " X "\n", RESULT, ##__VA_ARGS__); \ } \ } while (false) #define VULKAN_CALL_RESULT(SHARED_CONTEXT, RESULT, X) \ do { \ (RESULT) = VULKAN_CALL((SHARED_CONTEXT)->interface(), X); \ SkASSERT(VK_SUCCESS == RESULT || VK_ERROR_DEVICE_LOST == RESULT); \ VULKAN_LOG_IF_NOT_SUCCESS(SHARED_CONTEXT, RESULT, #X); \ (SHARED_CONTEXT)->checkVkResult(RESULT); \ } while (false) // same as VULKAN_CALL but checks for success #define VULKAN_CALL_ERRCHECK(SHARED_CONTEXT, X) \ VkResult SK_MACRO_APPEND_LINE(ret); \ VULKAN_CALL_RESULT(SHARED_CONTEXT, SK_MACRO_APPEND_LINE(ret), X) #define VULKAN_CALL_RESULT_NOCHECK(IFACE, RESULT, X) \ do { \ (RESULT) = VULKAN_CALL(IFACE, X); \ } while (false) namespace skgpu::graphite { class VulkanSharedContext; VkShaderModule createVulkanShaderModule(const VulkanSharedContext*, const std::string& spirv, VkShaderStageFlagBits); VkDescriptorType DsTypeEnumToVkDs(DescriptorType); void DescriptorDataToVkDescSetLayout(const VulkanSharedContext*, const SkSpan&, VkDescriptorSetLayout*); namespace ycbcrPackaging { // Functions to aid with packaging ycbcr information in a compact way. int numInt32sNeeded(const VulkanYcbcrConversionInfo&); uint32_t nonFormatInfoAsUInt32(const VulkanYcbcrConversionInfo&); static constexpr int kUsesExternalFormatBits = 1; static constexpr int kYcbcrModelBits = 3; static constexpr int kYcbcrRangeBits = 1; static constexpr int kXChromaOffsetBits = 1; static constexpr int kYChromaOffsetBits = 1; static constexpr int kChromaFilterBits = 1; static constexpr int kForceExplicitReconBits = 1; static constexpr int kComponentBits = 3; static constexpr int kUsesExternalFormatShift = 0; static constexpr int kYcbcrModelShift = kUsesExternalFormatShift + kUsesExternalFormatBits; static constexpr int kYcbcrRangeShift = kYcbcrModelShift + kYcbcrModelBits; static constexpr int kXChromaOffsetShift = kYcbcrRangeShift + kYcbcrRangeBits; static constexpr int kYChromaOffsetShift = kXChromaOffsetShift + kXChromaOffsetBits; static constexpr int kChromaFilterShift = kYChromaOffsetShift + kYChromaOffsetBits; static constexpr int kForceExplicitReconShift = kChromaFilterShift + kChromaFilterBits; static constexpr int kComponentRShift = kForceExplicitReconShift + kComponentBits; static constexpr int kComponentGShift = kComponentRShift + kComponentBits; static constexpr int kComponentBShift = kComponentGShift + kComponentBits; static constexpr int kComponentAShift = kComponentBShift + kComponentBits; static constexpr uint32_t kUseExternalFormatMask = ((1 << kUsesExternalFormatBits) - 1) << kUsesExternalFormatShift; static constexpr uint32_t kYcbcrModelMask = ((1 << kYcbcrModelBits) - 1) << kYcbcrModelShift; static constexpr uint32_t kYcbcrRangeMask = ((1 << kYcbcrRangeBits) - 1) << kYcbcrRangeShift; static constexpr uint32_t kXChromaOffsetMask = ((1 << kXChromaOffsetBits) - 1) << kXChromaOffsetShift; static constexpr uint32_t kYChromaOffsetMask = ((1 << kYChromaOffsetBits) - 1) << kYChromaOffsetShift; static constexpr uint32_t kChromaFilterMask = ((1 << kChromaFilterBits) - 1) << kChromaFilterShift; static constexpr uint32_t kForceExplicitReconMask = ((1 << kForceExplicitReconBits) - 1) << kForceExplicitReconShift; static constexpr uint32_t kComponentRMask = ((1 << kComponentBits) - 1) << kComponentRShift; static constexpr uint32_t kComponentBMask = ((1 << kComponentBits) - 1) << kComponentGShift; static constexpr uint32_t kComponentGMask = ((1 << kComponentBits) - 1) << kComponentBShift; static constexpr uint32_t kComponentAMask = ((1 << kComponentBits) - 1) << kComponentAShift; } // namespace ycbcrPackaging bool vkFormatIsSupported(VkFormat); VkShaderStageFlags PipelineStageFlagsToVkShaderStageFlags(SkEnumBitMask); } // namespace skgpu::graphite #endif // skgpu_graphite_VulkanGraphiteUtilsPriv_DEFINED