1 /*
2 * Copyright © 2022 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/set.h"
25 #include "anv_private.h"
26 #include "vk_common_entrypoints.h"
27
28 /**
29 * The DOOM 64 rendering corruption is happening because the game always uses
30 * ```
31 * vkCmdPipelineBarrier(VK_IMAGE_LAYOUT_UNDEFINED ->
32 * VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
33 * vkCmdCopyBufferToImage(...)
34 * vkCmdPipelineBarrier(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL ->
35 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
36 * ```
37 * when it wants to update its texture atlas image.
38 *
39 * According to spec, transitioning from VK_IMAGE_LAYOUT_UNDEFINED means
40 * that the current image content might be discarded, but the game relies
41 * on it being fully preserved.
42 *
43 * This work-around layer implements super-barebone layout tracking: allows
44 * the first transition from VK_IMAGE_LAYOUT_UNDEFINED, but replaces
45 * oldLayout with VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL for each
46 * subsequent transition of that image.
47 */
48
49 VKAPI_ATTR void VKAPI_CALL
doom64_CmdPipelineBarrier(VkCommandBuffer commandBuffer,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkDependencyFlags dependencyFlags,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)50 doom64_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
51 VkPipelineStageFlags srcStageMask,
52 VkPipelineStageFlags dstStageMask,
53 VkDependencyFlags dependencyFlags,
54 uint32_t memoryBarrierCount,
55 const VkMemoryBarrier* pMemoryBarriers,
56 uint32_t bufferMemoryBarrierCount,
57 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
58 uint32_t imageMemoryBarrierCount,
59 const VkImageMemoryBarrier* pImageMemoryBarriers)
60 {
61 ANV_FROM_HANDLE(anv_cmd_buffer, command_buffer, commandBuffer);
62 assert(command_buffer && command_buffer->device);
63
64 VkImageMemoryBarrier fixed_barrier;
65 struct set * defined_images =
66 command_buffer->device->workarounds.doom64_images;
67
68 if (defined_images &&
69 imageMemoryBarrierCount == 1 && pImageMemoryBarriers &&
70 pImageMemoryBarriers[0].oldLayout == VK_IMAGE_LAYOUT_UNDEFINED &&
71 pImageMemoryBarriers[0].newLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
72 ANV_FROM_HANDLE(anv_image, image, pImageMemoryBarriers[0].image);
73
74 if (!_mesa_set_search(defined_images, image)) {
75 _mesa_set_add(defined_images, image);
76 } else {
77 memcpy(&fixed_barrier, pImageMemoryBarriers, sizeof(VkImageMemoryBarrier));
78
79 fixed_barrier.oldLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
80
81 pImageMemoryBarriers = (const VkImageMemoryBarrier*) &fixed_barrier;
82 }
83 }
84
85 vk_common_CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask,
86 dependencyFlags, memoryBarrierCount,
87 pMemoryBarriers, bufferMemoryBarrierCount,
88 pBufferMemoryBarriers,
89 imageMemoryBarrierCount,
90 pImageMemoryBarriers);
91 }
92
93 VKAPI_ATTR VkResult VKAPI_CALL
doom64_CreateImage(VkDevice _device,const VkImageCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImage * pImage)94 doom64_CreateImage(VkDevice _device, const VkImageCreateInfo* pCreateInfo,
95 const VkAllocationCallbacks* pAllocator, VkImage* pImage)
96 {
97 ANV_FROM_HANDLE(anv_device, device, _device);
98 assert(device);
99
100 if (!device->workarounds.doom64_images) {
101 device->workarounds.doom64_images = _mesa_pointer_set_create(NULL);
102
103 if (!device->workarounds.doom64_images) {
104 return VK_ERROR_OUT_OF_HOST_MEMORY;
105 }
106 }
107
108 return anv_CreateImage(_device, pCreateInfo, pAllocator, pImage);
109 }
110
111 VKAPI_ATTR void VKAPI_CALL
doom64_DestroyImage(VkDevice _device,VkImage _image,const VkAllocationCallbacks * pAllocator)112 doom64_DestroyImage(VkDevice _device, VkImage _image,
113 const VkAllocationCallbacks *pAllocator)
114 {
115 ANV_FROM_HANDLE(anv_device, device, _device);
116 ANV_FROM_HANDLE(anv_image, image, _image);
117 assert(device);
118
119 struct set * defined_images = device->workarounds.doom64_images;
120
121 if (image && defined_images) {
122 _mesa_set_remove_key(defined_images, image);
123
124 if (!defined_images->entries) {
125 _mesa_set_destroy(defined_images, NULL);
126 device->workarounds.doom64_images = NULL;
127 }
128 }
129
130 anv_DestroyImage(_device, _image, pAllocator);
131 }
132