1 /*
2 * Copyright © 2022 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/set.h"
25 #include "anv_private.h"
26 #include "vk_common_entrypoints.h"
27
28 /**
29 * The DOOM 64 rendering corruption is happening because the game always uses
30 * ```
31 * vkCmdPipelineBarrier(VK_IMAGE_LAYOUT_UNDEFINED ->
32 * VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
33 * vkCmdCopyBufferToImage(...)
34 * vkCmdPipelineBarrier(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL ->
35 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
36 * ```
37 * when it wants to update its texture atlas image.
38 *
39 * According to spec, transitioning from VK_IMAGE_LAYOUT_UNDEFINED means
40 * that the current image content might be discarded, but the game relies
41 * on it being fully preserved.
42 *
43 * This work-around layer implements super-barebone layout tracking: allows
44 * the first transition from VK_IMAGE_LAYOUT_UNDEFINED, but replaces
45 * oldLayout with VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL for each
46 * subsequent transition of that image.
47 *
48 * Gen12+ does not ambiguate CCS data on transition from VK_IMAGE_LAYOUT_UNDEFINED
49 * so it preserves all compressed information, and this WA is not needed.
50 */
51
anv_doom64_CmdPipelineBarrier(VkCommandBuffer commandBuffer,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkDependencyFlags dependencyFlags,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)52 void anv_doom64_CmdPipelineBarrier(
53 VkCommandBuffer commandBuffer,
54 VkPipelineStageFlags srcStageMask,
55 VkPipelineStageFlags dstStageMask,
56 VkDependencyFlags dependencyFlags,
57 uint32_t memoryBarrierCount,
58 const VkMemoryBarrier* pMemoryBarriers,
59 uint32_t bufferMemoryBarrierCount,
60 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
61 uint32_t imageMemoryBarrierCount,
62 const VkImageMemoryBarrier* pImageMemoryBarriers)
63 {
64 ANV_FROM_HANDLE(anv_cmd_buffer, command_buffer, commandBuffer);
65 assert(command_buffer && command_buffer->device);
66
67 VkImageMemoryBarrier fixed_barrier;
68 struct set * defined_images =
69 command_buffer->device->workarounds.doom64_images;
70
71 if (defined_images &&
72 imageMemoryBarrierCount == 1 && pImageMemoryBarriers &&
73 pImageMemoryBarriers[0].oldLayout == VK_IMAGE_LAYOUT_UNDEFINED &&
74 pImageMemoryBarriers[0].newLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
75 ANV_FROM_HANDLE(anv_image, image, pImageMemoryBarriers[0].image);
76
77 if (!_mesa_set_search(defined_images, image)) {
78 _mesa_set_add(defined_images, image);
79 } else {
80 memcpy(&fixed_barrier, pImageMemoryBarriers, sizeof(VkImageMemoryBarrier));
81
82 fixed_barrier.oldLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
83
84 pImageMemoryBarriers = (const VkImageMemoryBarrier*) &fixed_barrier;
85 }
86 }
87
88 vk_common_CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask,
89 dependencyFlags, memoryBarrierCount,
90 pMemoryBarriers, bufferMemoryBarrierCount,
91 pBufferMemoryBarriers,
92 imageMemoryBarrierCount,
93 pImageMemoryBarriers);
94 }
95
anv_doom64_CreateImage(VkDevice _device,const VkImageCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImage * pImage)96 VkResult anv_doom64_CreateImage(
97 VkDevice _device,
98 const VkImageCreateInfo* pCreateInfo,
99 const VkAllocationCallbacks* pAllocator,
100 VkImage* pImage)
101 {
102 ANV_FROM_HANDLE(anv_device, device, _device);
103 assert(device);
104
105 if (!device->workarounds.doom64_images) {
106 device->workarounds.doom64_images = _mesa_pointer_set_create(NULL);
107
108 if (!device->workarounds.doom64_images) {
109 return VK_ERROR_OUT_OF_HOST_MEMORY;
110 }
111 }
112
113 return anv_CreateImage(_device, pCreateInfo, pAllocator, pImage);
114 }
115
anv_doom64_DestroyImage(VkDevice _device,VkImage _image,const VkAllocationCallbacks * pAllocator)116 void anv_doom64_DestroyImage(
117 VkDevice _device,
118 VkImage _image,
119 const VkAllocationCallbacks* pAllocator)
120 {
121 ANV_FROM_HANDLE(anv_device, device, _device);
122 ANV_FROM_HANDLE(anv_image, image, _image);
123 assert(device);
124
125 struct set * defined_images = device->workarounds.doom64_images;
126
127 if (image && defined_images) {
128 _mesa_set_remove_key(defined_images, image);
129
130 if (!defined_images->entries) {
131 _mesa_set_destroy(defined_images, NULL);
132 device->workarounds.doom64_images = NULL;
133 }
134 }
135
136 anv_DestroyImage(_device, _image, pAllocator);
137 }
138