1*b7893ccfSSadaf Ebrahimi //
2*b7893ccfSSadaf Ebrahimi // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3*b7893ccfSSadaf Ebrahimi //
4*b7893ccfSSadaf Ebrahimi // Permission is hereby granted, free of charge, to any person obtaining a copy
5*b7893ccfSSadaf Ebrahimi // of this software and associated documentation files (the "Software"), to deal
6*b7893ccfSSadaf Ebrahimi // in the Software without restriction, including without limitation the rights
7*b7893ccfSSadaf Ebrahimi // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8*b7893ccfSSadaf Ebrahimi // copies of the Software, and to permit persons to whom the Software is
9*b7893ccfSSadaf Ebrahimi // furnished to do so, subject to the following conditions:
10*b7893ccfSSadaf Ebrahimi //
11*b7893ccfSSadaf Ebrahimi // The above copyright notice and this permission notice shall be included in
12*b7893ccfSSadaf Ebrahimi // all copies or substantial portions of the Software.
13*b7893ccfSSadaf Ebrahimi //
14*b7893ccfSSadaf Ebrahimi // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*b7893ccfSSadaf Ebrahimi // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*b7893ccfSSadaf Ebrahimi // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17*b7893ccfSSadaf Ebrahimi // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18*b7893ccfSSadaf Ebrahimi // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19*b7893ccfSSadaf Ebrahimi // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20*b7893ccfSSadaf Ebrahimi // THE SOFTWARE.
21*b7893ccfSSadaf Ebrahimi //
22*b7893ccfSSadaf Ebrahimi
23*b7893ccfSSadaf Ebrahimi // clang-format off
24*b7893ccfSSadaf Ebrahimi //
25*b7893ccfSSadaf Ebrahimi // Source: https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator
26*b7893ccfSSadaf Ebrahimi // THIS FILE HAS BEEN CHANGED FROM THE ORIGINAL VERSION
27*b7893ccfSSadaf Ebrahimi //
28*b7893ccfSSadaf Ebrahimi // Change Log:
29*b7893ccfSSadaf Ebrahimi // 3/27/19 - Make changes to suppress warnings from GCC
30*b7893ccfSSadaf Ebrahimi // 4/18/19 - Make changes to suppress warnings from clang
31*b7893ccfSSadaf Ebrahimi // 6/05/19 - Make changes to suppress warnings from clang 3.8.0
32*b7893ccfSSadaf Ebrahimi // 6/05/19 - Make changes to suppress more warnings from GCC
33*b7893ccfSSadaf Ebrahimi // 8/09/19 - Make changes to suppress dead code warnings (from upstream master branch)
34*b7893ccfSSadaf Ebrahimi //
35*b7893ccfSSadaf Ebrahimi
36*b7893ccfSSadaf Ebrahimi #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
37*b7893ccfSSadaf Ebrahimi #define AMD_VULKAN_MEMORY_ALLOCATOR_H
38*b7893ccfSSadaf Ebrahimi
39*b7893ccfSSadaf Ebrahimi #ifdef __cplusplus
40*b7893ccfSSadaf Ebrahimi extern "C" {
41*b7893ccfSSadaf Ebrahimi #endif
42*b7893ccfSSadaf Ebrahimi
43*b7893ccfSSadaf Ebrahimi /** \mainpage Vulkan Memory Allocator
44*b7893ccfSSadaf Ebrahimi
45*b7893ccfSSadaf Ebrahimi <b>Version 2.2.0</b> (2018-12-13)
46*b7893ccfSSadaf Ebrahimi
47*b7893ccfSSadaf Ebrahimi Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved. \n
48*b7893ccfSSadaf Ebrahimi License: MIT
49*b7893ccfSSadaf Ebrahimi
50*b7893ccfSSadaf Ebrahimi Documentation of all members: vk_mem_alloc.h
51*b7893ccfSSadaf Ebrahimi
52*b7893ccfSSadaf Ebrahimi \section main_table_of_contents Table of contents
53*b7893ccfSSadaf Ebrahimi
54*b7893ccfSSadaf Ebrahimi - <b>User guide</b>
55*b7893ccfSSadaf Ebrahimi - \subpage quick_start
56*b7893ccfSSadaf Ebrahimi - [Project setup](@ref quick_start_project_setup)
57*b7893ccfSSadaf Ebrahimi - [Initialization](@ref quick_start_initialization)
58*b7893ccfSSadaf Ebrahimi - [Resource allocation](@ref quick_start_resource_allocation)
59*b7893ccfSSadaf Ebrahimi - \subpage choosing_memory_type
60*b7893ccfSSadaf Ebrahimi - [Usage](@ref choosing_memory_type_usage)
61*b7893ccfSSadaf Ebrahimi - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags)
62*b7893ccfSSadaf Ebrahimi - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types)
63*b7893ccfSSadaf Ebrahimi - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools)
64*b7893ccfSSadaf Ebrahimi - \subpage memory_mapping
65*b7893ccfSSadaf Ebrahimi - [Mapping functions](@ref memory_mapping_mapping_functions)
66*b7893ccfSSadaf Ebrahimi - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory)
67*b7893ccfSSadaf Ebrahimi - [Cache control](@ref memory_mapping_cache_control)
68*b7893ccfSSadaf Ebrahimi - [Finding out if memory is mappable](@ref memory_mapping_finding_if_memory_mappable)
69*b7893ccfSSadaf Ebrahimi - \subpage custom_memory_pools
70*b7893ccfSSadaf Ebrahimi - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex)
71*b7893ccfSSadaf Ebrahimi - [Linear allocation algorithm](@ref linear_algorithm)
72*b7893ccfSSadaf Ebrahimi - [Free-at-once](@ref linear_algorithm_free_at_once)
73*b7893ccfSSadaf Ebrahimi - [Stack](@ref linear_algorithm_stack)
74*b7893ccfSSadaf Ebrahimi - [Double stack](@ref linear_algorithm_double_stack)
75*b7893ccfSSadaf Ebrahimi - [Ring buffer](@ref linear_algorithm_ring_buffer)
76*b7893ccfSSadaf Ebrahimi - [Buddy allocation algorithm](@ref buddy_algorithm)
77*b7893ccfSSadaf Ebrahimi - \subpage defragmentation
78*b7893ccfSSadaf Ebrahimi - [Defragmenting CPU memory](@ref defragmentation_cpu)
79*b7893ccfSSadaf Ebrahimi - [Defragmenting GPU memory](@ref defragmentation_gpu)
80*b7893ccfSSadaf Ebrahimi - [Additional notes](@ref defragmentation_additional_notes)
81*b7893ccfSSadaf Ebrahimi - [Writing custom allocation algorithm](@ref defragmentation_custom_algorithm)
82*b7893ccfSSadaf Ebrahimi - \subpage lost_allocations
83*b7893ccfSSadaf Ebrahimi - \subpage statistics
84*b7893ccfSSadaf Ebrahimi - [Numeric statistics](@ref statistics_numeric_statistics)
85*b7893ccfSSadaf Ebrahimi - [JSON dump](@ref statistics_json_dump)
86*b7893ccfSSadaf Ebrahimi - \subpage allocation_annotation
87*b7893ccfSSadaf Ebrahimi - [Allocation user data](@ref allocation_user_data)
88*b7893ccfSSadaf Ebrahimi - [Allocation names](@ref allocation_names)
89*b7893ccfSSadaf Ebrahimi - \subpage debugging_memory_usage
90*b7893ccfSSadaf Ebrahimi - [Memory initialization](@ref debugging_memory_usage_initialization)
91*b7893ccfSSadaf Ebrahimi - [Margins](@ref debugging_memory_usage_margins)
92*b7893ccfSSadaf Ebrahimi - [Corruption detection](@ref debugging_memory_usage_corruption_detection)
93*b7893ccfSSadaf Ebrahimi - \subpage record_and_replay
94*b7893ccfSSadaf Ebrahimi - \subpage usage_patterns
95*b7893ccfSSadaf Ebrahimi - [Simple patterns](@ref usage_patterns_simple)
96*b7893ccfSSadaf Ebrahimi - [Advanced patterns](@ref usage_patterns_advanced)
97*b7893ccfSSadaf Ebrahimi - \subpage configuration
98*b7893ccfSSadaf Ebrahimi - [Pointers to Vulkan functions](@ref config_Vulkan_functions)
99*b7893ccfSSadaf Ebrahimi - [Custom host memory allocator](@ref custom_memory_allocator)
100*b7893ccfSSadaf Ebrahimi - [Device memory allocation callbacks](@ref allocation_callbacks)
101*b7893ccfSSadaf Ebrahimi - [Device heap memory limit](@ref heap_memory_limit)
102*b7893ccfSSadaf Ebrahimi - \subpage vk_khr_dedicated_allocation
103*b7893ccfSSadaf Ebrahimi - \subpage general_considerations
104*b7893ccfSSadaf Ebrahimi - [Thread safety](@ref general_considerations_thread_safety)
105*b7893ccfSSadaf Ebrahimi - [Validation layer warnings](@ref general_considerations_validation_layer_warnings)
106*b7893ccfSSadaf Ebrahimi - [Allocation algorithm](@ref general_considerations_allocation_algorithm)
107*b7893ccfSSadaf Ebrahimi - [Features not supported](@ref general_considerations_features_not_supported)
108*b7893ccfSSadaf Ebrahimi
109*b7893ccfSSadaf Ebrahimi \section main_see_also See also
110*b7893ccfSSadaf Ebrahimi
111*b7893ccfSSadaf Ebrahimi - [Product page on GPUOpen](https://gpuopen.com/gaming-product/vulkan-memory-allocator/)
112*b7893ccfSSadaf Ebrahimi - [Source repository on GitHub](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator)
113*b7893ccfSSadaf Ebrahimi
114*b7893ccfSSadaf Ebrahimi
115*b7893ccfSSadaf Ebrahimi
116*b7893ccfSSadaf Ebrahimi
117*b7893ccfSSadaf Ebrahimi \page quick_start Quick start
118*b7893ccfSSadaf Ebrahimi
119*b7893ccfSSadaf Ebrahimi \section quick_start_project_setup Project setup
120*b7893ccfSSadaf Ebrahimi
121*b7893ccfSSadaf Ebrahimi Vulkan Memory Allocator comes in form of a single header file.
122*b7893ccfSSadaf Ebrahimi You don't need to build it as a separate library project.
123*b7893ccfSSadaf Ebrahimi You can add this file directly to your project and submit it to code repository next to your other source files.
124*b7893ccfSSadaf Ebrahimi
125*b7893ccfSSadaf Ebrahimi "Single header" doesn't mean that everything is contained in C/C++ declarations,
126*b7893ccfSSadaf Ebrahimi like it tends to be in case of inline functions or C++ templates.
127*b7893ccfSSadaf Ebrahimi It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro.
128*b7893ccfSSadaf Ebrahimi If you don't do it properly, you will get linker errors.
129*b7893ccfSSadaf Ebrahimi
130*b7893ccfSSadaf Ebrahimi To do it properly:
131*b7893ccfSSadaf Ebrahimi
132*b7893ccfSSadaf Ebrahimi -# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library.
133*b7893ccfSSadaf Ebrahimi This includes declarations of all members of the library.
134*b7893ccfSSadaf Ebrahimi -# In exacly one CPP file define following macro before this include.
135*b7893ccfSSadaf Ebrahimi It enables also internal definitions.
136*b7893ccfSSadaf Ebrahimi
137*b7893ccfSSadaf Ebrahimi \code
138*b7893ccfSSadaf Ebrahimi #define VMA_IMPLEMENTATION
139*b7893ccfSSadaf Ebrahimi #include "vk_mem_alloc.h"
140*b7893ccfSSadaf Ebrahimi \endcode
141*b7893ccfSSadaf Ebrahimi
142*b7893ccfSSadaf Ebrahimi It may be a good idea to create dedicated CPP file just for this purpose.
143*b7893ccfSSadaf Ebrahimi
144*b7893ccfSSadaf Ebrahimi Note on language: This library is written in C++, but has C-compatible interface.
145*b7893ccfSSadaf Ebrahimi Thus you can include and use vk_mem_alloc.h in C or C++ code, but full
146*b7893ccfSSadaf Ebrahimi implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C.
147*b7893ccfSSadaf Ebrahimi
148*b7893ccfSSadaf Ebrahimi Please note that this library includes header `<vulkan/vulkan.h>`, which in turn
149*b7893ccfSSadaf Ebrahimi includes `<windows.h>` on Windows. If you need some specific macros defined
150*b7893ccfSSadaf Ebrahimi before including these headers (like `WIN32_LEAN_AND_MEAN` or
151*b7893ccfSSadaf Ebrahimi `WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define
152*b7893ccfSSadaf Ebrahimi them before every `#include` of this library.
153*b7893ccfSSadaf Ebrahimi
154*b7893ccfSSadaf Ebrahimi
155*b7893ccfSSadaf Ebrahimi \section quick_start_initialization Initialization
156*b7893ccfSSadaf Ebrahimi
157*b7893ccfSSadaf Ebrahimi At program startup:
158*b7893ccfSSadaf Ebrahimi
159*b7893ccfSSadaf Ebrahimi -# Initialize Vulkan to have `VkPhysicalDevice` and `VkDevice` object.
160*b7893ccfSSadaf Ebrahimi -# Fill VmaAllocatorCreateInfo structure and create #VmaAllocator object by
161*b7893ccfSSadaf Ebrahimi calling vmaCreateAllocator().
162*b7893ccfSSadaf Ebrahimi
163*b7893ccfSSadaf Ebrahimi \code
164*b7893ccfSSadaf Ebrahimi VmaAllocatorCreateInfo allocatorInfo = {};
165*b7893ccfSSadaf Ebrahimi allocatorInfo.physicalDevice = physicalDevice;
166*b7893ccfSSadaf Ebrahimi allocatorInfo.device = device;
167*b7893ccfSSadaf Ebrahimi
168*b7893ccfSSadaf Ebrahimi VmaAllocator allocator;
169*b7893ccfSSadaf Ebrahimi vmaCreateAllocator(&allocatorInfo, &allocator);
170*b7893ccfSSadaf Ebrahimi \endcode
171*b7893ccfSSadaf Ebrahimi
172*b7893ccfSSadaf Ebrahimi \section quick_start_resource_allocation Resource allocation
173*b7893ccfSSadaf Ebrahimi
174*b7893ccfSSadaf Ebrahimi When you want to create a buffer or image:
175*b7893ccfSSadaf Ebrahimi
176*b7893ccfSSadaf Ebrahimi -# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure.
177*b7893ccfSSadaf Ebrahimi -# Fill VmaAllocationCreateInfo structure.
178*b7893ccfSSadaf Ebrahimi -# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory
179*b7893ccfSSadaf Ebrahimi already allocated and bound to it.
180*b7893ccfSSadaf Ebrahimi
181*b7893ccfSSadaf Ebrahimi \code
182*b7893ccfSSadaf Ebrahimi VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
183*b7893ccfSSadaf Ebrahimi bufferInfo.size = 65536;
184*b7893ccfSSadaf Ebrahimi bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
185*b7893ccfSSadaf Ebrahimi
186*b7893ccfSSadaf Ebrahimi VmaAllocationCreateInfo allocInfo = {};
187*b7893ccfSSadaf Ebrahimi allocInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
188*b7893ccfSSadaf Ebrahimi
189*b7893ccfSSadaf Ebrahimi VkBuffer buffer;
190*b7893ccfSSadaf Ebrahimi VmaAllocation allocation;
191*b7893ccfSSadaf Ebrahimi vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
192*b7893ccfSSadaf Ebrahimi \endcode
193*b7893ccfSSadaf Ebrahimi
194*b7893ccfSSadaf Ebrahimi Don't forget to destroy your objects when no longer needed:
195*b7893ccfSSadaf Ebrahimi
196*b7893ccfSSadaf Ebrahimi \code
197*b7893ccfSSadaf Ebrahimi vmaDestroyBuffer(allocator, buffer, allocation);
198*b7893ccfSSadaf Ebrahimi vmaDestroyAllocator(allocator);
199*b7893ccfSSadaf Ebrahimi \endcode
200*b7893ccfSSadaf Ebrahimi
201*b7893ccfSSadaf Ebrahimi
202*b7893ccfSSadaf Ebrahimi \page choosing_memory_type Choosing memory type
203*b7893ccfSSadaf Ebrahimi
204*b7893ccfSSadaf Ebrahimi Physical devices in Vulkan support various combinations of memory heaps and
205*b7893ccfSSadaf Ebrahimi types. Help with choosing correct and optimal memory type for your specific
206*b7893ccfSSadaf Ebrahimi resource is one of the key features of this library. You can use it by filling
207*b7893ccfSSadaf Ebrahimi appropriate members of VmaAllocationCreateInfo structure, as described below.
208*b7893ccfSSadaf Ebrahimi You can also combine multiple methods.
209*b7893ccfSSadaf Ebrahimi
210*b7893ccfSSadaf Ebrahimi -# If you just want to find memory type index that meets your requirements, you
211*b7893ccfSSadaf Ebrahimi can use function vmaFindMemoryTypeIndex().
212*b7893ccfSSadaf Ebrahimi -# If you want to allocate a region of device memory without association with any
213*b7893ccfSSadaf Ebrahimi specific image or buffer, you can use function vmaAllocateMemory(). Usage of
214*b7893ccfSSadaf Ebrahimi this function is not recommended and usually not needed.
215*b7893ccfSSadaf Ebrahimi -# If you already have a buffer or an image created, you want to allocate memory
216*b7893ccfSSadaf Ebrahimi for it and then you will bind it yourself, you can use function
217*b7893ccfSSadaf Ebrahimi vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage().
218*b7893ccfSSadaf Ebrahimi For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory().
219*b7893ccfSSadaf Ebrahimi -# If you want to create a buffer or an image, allocate memory for it and bind
220*b7893ccfSSadaf Ebrahimi them together, all in one call, you can use function vmaCreateBuffer(),
221*b7893ccfSSadaf Ebrahimi vmaCreateImage(). This is the recommended way to use this library.
222*b7893ccfSSadaf Ebrahimi
223*b7893ccfSSadaf Ebrahimi When using 3. or 4., the library internally queries Vulkan for memory types
224*b7893ccfSSadaf Ebrahimi supported for that buffer or image (function `vkGetBufferMemoryRequirements()`)
225*b7893ccfSSadaf Ebrahimi and uses only one of these types.
226*b7893ccfSSadaf Ebrahimi
227*b7893ccfSSadaf Ebrahimi If no memory type can be found that meets all the requirements, these functions
228*b7893ccfSSadaf Ebrahimi return `VK_ERROR_FEATURE_NOT_PRESENT`.
229*b7893ccfSSadaf Ebrahimi
230*b7893ccfSSadaf Ebrahimi You can leave VmaAllocationCreateInfo structure completely filled with zeros.
231*b7893ccfSSadaf Ebrahimi It means no requirements are specified for memory type.
232*b7893ccfSSadaf Ebrahimi It is valid, although not very useful.
233*b7893ccfSSadaf Ebrahimi
234*b7893ccfSSadaf Ebrahimi \section choosing_memory_type_usage Usage
235*b7893ccfSSadaf Ebrahimi
236*b7893ccfSSadaf Ebrahimi The easiest way to specify memory requirements is to fill member
237*b7893ccfSSadaf Ebrahimi VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage.
238*b7893ccfSSadaf Ebrahimi It defines high level, common usage types.
239*b7893ccfSSadaf Ebrahimi For more details, see description of this enum.
240*b7893ccfSSadaf Ebrahimi
241*b7893ccfSSadaf Ebrahimi For example, if you want to create a uniform buffer that will be filled using
242*b7893ccfSSadaf Ebrahimi transfer only once or infrequently and used for rendering every frame, you can
243*b7893ccfSSadaf Ebrahimi do it using following code:
244*b7893ccfSSadaf Ebrahimi
245*b7893ccfSSadaf Ebrahimi \code
246*b7893ccfSSadaf Ebrahimi VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
247*b7893ccfSSadaf Ebrahimi bufferInfo.size = 65536;
248*b7893ccfSSadaf Ebrahimi bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
249*b7893ccfSSadaf Ebrahimi
250*b7893ccfSSadaf Ebrahimi VmaAllocationCreateInfo allocInfo = {};
251*b7893ccfSSadaf Ebrahimi allocInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
252*b7893ccfSSadaf Ebrahimi
253*b7893ccfSSadaf Ebrahimi VkBuffer buffer;
254*b7893ccfSSadaf Ebrahimi VmaAllocation allocation;
255*b7893ccfSSadaf Ebrahimi vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
256*b7893ccfSSadaf Ebrahimi \endcode
257*b7893ccfSSadaf Ebrahimi
258*b7893ccfSSadaf Ebrahimi \section choosing_memory_type_required_preferred_flags Required and preferred flags
259*b7893ccfSSadaf Ebrahimi
260*b7893ccfSSadaf Ebrahimi You can specify more detailed requirements by filling members
261*b7893ccfSSadaf Ebrahimi VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags
262*b7893ccfSSadaf Ebrahimi with a combination of bits from enum `VkMemoryPropertyFlags`. For example,
263*b7893ccfSSadaf Ebrahimi if you want to create a buffer that will be persistently mapped on host (so it
264*b7893ccfSSadaf Ebrahimi must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`,
265*b7893ccfSSadaf Ebrahimi use following code:
266*b7893ccfSSadaf Ebrahimi
267*b7893ccfSSadaf Ebrahimi \code
268*b7893ccfSSadaf Ebrahimi VmaAllocationCreateInfo allocInfo = {};
269*b7893ccfSSadaf Ebrahimi allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
270*b7893ccfSSadaf Ebrahimi allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
271*b7893ccfSSadaf Ebrahimi allocInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
272*b7893ccfSSadaf Ebrahimi
273*b7893ccfSSadaf Ebrahimi VkBuffer buffer;
274*b7893ccfSSadaf Ebrahimi VmaAllocation allocation;
275*b7893ccfSSadaf Ebrahimi vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
276*b7893ccfSSadaf Ebrahimi \endcode
277*b7893ccfSSadaf Ebrahimi
278*b7893ccfSSadaf Ebrahimi A memory type is chosen that has all the required flags and as many preferred
279*b7893ccfSSadaf Ebrahimi flags set as possible.
280*b7893ccfSSadaf Ebrahimi
281*b7893ccfSSadaf Ebrahimi If you use VmaAllocationCreateInfo::usage, it is just internally converted to
282*b7893ccfSSadaf Ebrahimi a set of required and preferred flags.
283*b7893ccfSSadaf Ebrahimi
284*b7893ccfSSadaf Ebrahimi \section choosing_memory_type_explicit_memory_types Explicit memory types
285*b7893ccfSSadaf Ebrahimi
286*b7893ccfSSadaf Ebrahimi If you inspected memory types available on the physical device and you have
287*b7893ccfSSadaf Ebrahimi a preference for memory types that you want to use, you can fill member
288*b7893ccfSSadaf Ebrahimi VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set
289*b7893ccfSSadaf Ebrahimi means that a memory type with that index is allowed to be used for the
290*b7893ccfSSadaf Ebrahimi allocation. Special value 0, just like `UINT32_MAX`, means there are no
291*b7893ccfSSadaf Ebrahimi restrictions to memory type index.
292*b7893ccfSSadaf Ebrahimi
293*b7893ccfSSadaf Ebrahimi Please note that this member is NOT just a memory type index.
294*b7893ccfSSadaf Ebrahimi Still you can use it to choose just one, specific memory type.
295*b7893ccfSSadaf Ebrahimi For example, if you already determined that your buffer should be created in
296*b7893ccfSSadaf Ebrahimi memory type 2, use following code:
297*b7893ccfSSadaf Ebrahimi
298*b7893ccfSSadaf Ebrahimi \code
299*b7893ccfSSadaf Ebrahimi uint32_t memoryTypeIndex = 2;
300*b7893ccfSSadaf Ebrahimi
301*b7893ccfSSadaf Ebrahimi VmaAllocationCreateInfo allocInfo = {};
302*b7893ccfSSadaf Ebrahimi allocInfo.memoryTypeBits = 1u << memoryTypeIndex;
303*b7893ccfSSadaf Ebrahimi
304*b7893ccfSSadaf Ebrahimi VkBuffer buffer;
305*b7893ccfSSadaf Ebrahimi VmaAllocation allocation;
306*b7893ccfSSadaf Ebrahimi vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
307*b7893ccfSSadaf Ebrahimi \endcode
308*b7893ccfSSadaf Ebrahimi
309*b7893ccfSSadaf Ebrahimi \section choosing_memory_type_custom_memory_pools Custom memory pools
310*b7893ccfSSadaf Ebrahimi
311*b7893ccfSSadaf Ebrahimi If you allocate from custom memory pool, all the ways of specifying memory
312*b7893ccfSSadaf Ebrahimi requirements described above are not applicable and the aforementioned members
313*b7893ccfSSadaf Ebrahimi of VmaAllocationCreateInfo structure are ignored. Memory type is selected
314*b7893ccfSSadaf Ebrahimi explicitly when creating the pool and then used to make all the allocations from
315*b7893ccfSSadaf Ebrahimi that pool. For further details, see \ref custom_memory_pools.
316*b7893ccfSSadaf Ebrahimi
317*b7893ccfSSadaf Ebrahimi
318*b7893ccfSSadaf Ebrahimi \page memory_mapping Memory mapping
319*b7893ccfSSadaf Ebrahimi
320*b7893ccfSSadaf Ebrahimi To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`,
321*b7893ccfSSadaf Ebrahimi to be able to read from it or write to it in CPU code.
322*b7893ccfSSadaf Ebrahimi Mapping is possible only of memory allocated from a memory type that has
323*b7893ccfSSadaf Ebrahimi `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag.
324*b7893ccfSSadaf Ebrahimi Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose.
325*b7893ccfSSadaf Ebrahimi You can use them directly with memory allocated by this library,
326*b7893ccfSSadaf Ebrahimi but it is not recommended because of following issue:
327*b7893ccfSSadaf Ebrahimi Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed.
328*b7893ccfSSadaf Ebrahimi This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan.
329*b7893ccfSSadaf Ebrahimi Because of this, Vulkan Memory Allocator provides following facilities:
330*b7893ccfSSadaf Ebrahimi
331*b7893ccfSSadaf Ebrahimi \section memory_mapping_mapping_functions Mapping functions
332*b7893ccfSSadaf Ebrahimi
333*b7893ccfSSadaf Ebrahimi The library provides following functions for mapping of a specific #VmaAllocation: vmaMapMemory(), vmaUnmapMemory().
334*b7893ccfSSadaf Ebrahimi They are safer and more convenient to use than standard Vulkan functions.
335*b7893ccfSSadaf Ebrahimi You can map an allocation multiple times simultaneously - mapping is reference-counted internally.
336*b7893ccfSSadaf Ebrahimi You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block.
337*b7893ccfSSadaf Ebrahimi The way it's implemented is that the library always maps entire memory block, not just region of the allocation.
338*b7893ccfSSadaf Ebrahimi For further details, see description of vmaMapMemory() function.
339*b7893ccfSSadaf Ebrahimi Example:
340*b7893ccfSSadaf Ebrahimi
341*b7893ccfSSadaf Ebrahimi \code
342*b7893ccfSSadaf Ebrahimi // Having these objects initialized:
343*b7893ccfSSadaf Ebrahimi
344*b7893ccfSSadaf Ebrahimi struct ConstantBuffer
345*b7893ccfSSadaf Ebrahimi {
346*b7893ccfSSadaf Ebrahimi ...
347*b7893ccfSSadaf Ebrahimi };
348*b7893ccfSSadaf Ebrahimi ConstantBuffer constantBufferData;
349*b7893ccfSSadaf Ebrahimi
350*b7893ccfSSadaf Ebrahimi VmaAllocator allocator;
351*b7893ccfSSadaf Ebrahimi VkBuffer constantBuffer;
352*b7893ccfSSadaf Ebrahimi VmaAllocation constantBufferAllocation;
353*b7893ccfSSadaf Ebrahimi
354*b7893ccfSSadaf Ebrahimi // You can map and fill your buffer using following code:
355*b7893ccfSSadaf Ebrahimi
356*b7893ccfSSadaf Ebrahimi void* mappedData;
357*b7893ccfSSadaf Ebrahimi vmaMapMemory(allocator, constantBufferAllocation, &mappedData);
358*b7893ccfSSadaf Ebrahimi memcpy(mappedData, &constantBufferData, sizeof(constantBufferData));
359*b7893ccfSSadaf Ebrahimi vmaUnmapMemory(allocator, constantBufferAllocation);
360*b7893ccfSSadaf Ebrahimi \endcode
361*b7893ccfSSadaf Ebrahimi
362*b7893ccfSSadaf Ebrahimi When mapping, you may see a warning from Vulkan validation layer similar to this one:
363*b7893ccfSSadaf Ebrahimi
364*b7893ccfSSadaf Ebrahimi <i>Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.</i>
365*b7893ccfSSadaf Ebrahimi
366*b7893ccfSSadaf Ebrahimi It happens because the library maps entire `VkDeviceMemory` block, where different
367*b7893ccfSSadaf Ebrahimi types of images and buffers may end up together, especially on GPUs with unified memory like Intel.
368*b7893ccfSSadaf Ebrahimi You can safely ignore it if you are sure you access only memory of the intended
369*b7893ccfSSadaf Ebrahimi object that you wanted to map.
370*b7893ccfSSadaf Ebrahimi
371*b7893ccfSSadaf Ebrahimi
372*b7893ccfSSadaf Ebrahimi \section memory_mapping_persistently_mapped_memory Persistently mapped memory
373*b7893ccfSSadaf Ebrahimi
374*b7893ccfSSadaf Ebrahimi Kepping your memory persistently mapped is generally OK in Vulkan.
375*b7893ccfSSadaf Ebrahimi You don't need to unmap it before using its data on the GPU.
376*b7893ccfSSadaf Ebrahimi The library provides a special feature designed for that:
377*b7893ccfSSadaf Ebrahimi Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in
378*b7893ccfSSadaf Ebrahimi VmaAllocationCreateInfo::flags stay mapped all the time,
379*b7893ccfSSadaf Ebrahimi so you can just access CPU pointer to it any time
380*b7893ccfSSadaf Ebrahimi without a need to call any "map" or "unmap" function.
381*b7893ccfSSadaf Ebrahimi Example:
382*b7893ccfSSadaf Ebrahimi
383*b7893ccfSSadaf Ebrahimi \code
384*b7893ccfSSadaf Ebrahimi VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
385*b7893ccfSSadaf Ebrahimi bufCreateInfo.size = sizeof(ConstantBuffer);
386*b7893ccfSSadaf Ebrahimi bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
387*b7893ccfSSadaf Ebrahimi
388*b7893ccfSSadaf Ebrahimi VmaAllocationCreateInfo allocCreateInfo = {};
389*b7893ccfSSadaf Ebrahimi allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
390*b7893ccfSSadaf Ebrahimi allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
391*b7893ccfSSadaf Ebrahimi
392*b7893ccfSSadaf Ebrahimi VkBuffer buf;
393*b7893ccfSSadaf Ebrahimi VmaAllocation alloc;
394*b7893ccfSSadaf Ebrahimi VmaAllocationInfo allocInfo;
395*b7893ccfSSadaf Ebrahimi vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
396*b7893ccfSSadaf Ebrahimi
397*b7893ccfSSadaf Ebrahimi // Buffer is already mapped. You can access its memory.
398*b7893ccfSSadaf Ebrahimi memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData));
399*b7893ccfSSadaf Ebrahimi \endcode
400*b7893ccfSSadaf Ebrahimi
401*b7893ccfSSadaf Ebrahimi There are some exceptions though, when you should consider mapping memory only for a short period of time:
402*b7893ccfSSadaf Ebrahimi
403*b7893ccfSSadaf Ebrahimi - When operating system is Windows 7 or 8.x (Windows 10 is not affected because it uses WDDM2),
404*b7893ccfSSadaf Ebrahimi device is discrete AMD GPU,
405*b7893ccfSSadaf Ebrahimi and memory type is the special 256 MiB pool of `DEVICE_LOCAL + HOST_VISIBLE` memory
406*b7893ccfSSadaf Ebrahimi (selected when you use #VMA_MEMORY_USAGE_CPU_TO_GPU),
407*b7893ccfSSadaf Ebrahimi then whenever a memory block allocated from this memory type stays mapped
408*b7893ccfSSadaf Ebrahimi for the time of any call to `vkQueueSubmit()` or `vkQueuePresentKHR()`, this
409*b7893ccfSSadaf Ebrahimi block is migrated by WDDM to system RAM, which degrades performance. It doesn't
410*b7893ccfSSadaf Ebrahimi matter if that particular memory block is actually used by the command buffer
411*b7893ccfSSadaf Ebrahimi being submitted.
412*b7893ccfSSadaf Ebrahimi - On Mac/MoltenVK there is a known bug - [Issue #175](https://github.com/KhronosGroup/MoltenVK/issues/175)
413*b7893ccfSSadaf Ebrahimi which requires unmapping before GPU can see updated texture.
414*b7893ccfSSadaf Ebrahimi - Keeping many large memory blocks mapped may impact performance or stability of some debugging tools.
415*b7893ccfSSadaf Ebrahimi
416*b7893ccfSSadaf Ebrahimi \section memory_mapping_cache_control Cache control
417*b7893ccfSSadaf Ebrahimi
418*b7893ccfSSadaf Ebrahimi Memory in Vulkan doesn't need to be unmapped before using it on GPU,
419*b7893ccfSSadaf Ebrahimi but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set,
420*b7893ccfSSadaf Ebrahimi you need to manually invalidate cache before reading of mapped pointer
421*b7893ccfSSadaf Ebrahimi and flush cache after writing to mapped pointer.
422*b7893ccfSSadaf Ebrahimi Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`,
423*b7893ccfSSadaf Ebrahimi `vkInvalidateMappedMemoryRanges()`, but this library provides more convenient
424*b7893ccfSSadaf Ebrahimi functions that refer to given allocation object: vmaFlushAllocation(),
425*b7893ccfSSadaf Ebrahimi vmaInvalidateAllocation().
426*b7893ccfSSadaf Ebrahimi
427*b7893ccfSSadaf Ebrahimi Regions of memory specified for flush/invalidate must be aligned to
428*b7893ccfSSadaf Ebrahimi `VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library.
429*b7893ccfSSadaf Ebrahimi In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations
430*b7893ccfSSadaf Ebrahimi within blocks are aligned to this value, so their offsets are always multiply of
431*b7893ccfSSadaf Ebrahimi `nonCoherentAtomSize` and two different allocations never share same "line" of this size.
432*b7893ccfSSadaf Ebrahimi
433*b7893ccfSSadaf Ebrahimi Please note that memory allocated with #VMA_MEMORY_USAGE_CPU_ONLY is guaranteed to be `HOST_COHERENT`.
434*b7893ccfSSadaf Ebrahimi
435*b7893ccfSSadaf Ebrahimi Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA)
436*b7893ccfSSadaf Ebrahimi currently provide `HOST_COHERENT` flag on all memory types that are
437*b7893ccfSSadaf Ebrahimi `HOST_VISIBLE`, so on this platform you may not need to bother.
438*b7893ccfSSadaf Ebrahimi
439*b7893ccfSSadaf Ebrahimi \section memory_mapping_finding_if_memory_mappable Finding out if memory is mappable
440*b7893ccfSSadaf Ebrahimi
441*b7893ccfSSadaf Ebrahimi It may happen that your allocation ends up in memory that is `HOST_VISIBLE` (available for mapping)
442*b7893ccfSSadaf Ebrahimi despite it wasn't explicitly requested.
443*b7893ccfSSadaf Ebrahimi For example, application may work on integrated graphics with unified memory (like Intel) or
444*b7893ccfSSadaf Ebrahimi allocation from video memory might have failed, so the library chose system memory as fallback.
445*b7893ccfSSadaf Ebrahimi
446*b7893ccfSSadaf Ebrahimi You can detect this case and map such allocation to access its memory on CPU directly,
447*b7893ccfSSadaf Ebrahimi instead of launching a transfer operation.
448*b7893ccfSSadaf Ebrahimi In order to do that: inspect `allocInfo.memoryType`, call vmaGetMemoryTypeProperties(),
449*b7893ccfSSadaf Ebrahimi and look for `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag in properties of that memory type.
450*b7893ccfSSadaf Ebrahimi
451*b7893ccfSSadaf Ebrahimi \code
452*b7893ccfSSadaf Ebrahimi VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
453*b7893ccfSSadaf Ebrahimi bufCreateInfo.size = sizeof(ConstantBuffer);
454*b7893ccfSSadaf Ebrahimi bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
455*b7893ccfSSadaf Ebrahimi
456*b7893ccfSSadaf Ebrahimi VmaAllocationCreateInfo allocCreateInfo = {};
457*b7893ccfSSadaf Ebrahimi allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
458*b7893ccfSSadaf Ebrahimi allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
459*b7893ccfSSadaf Ebrahimi
460*b7893ccfSSadaf Ebrahimi VkBuffer buf;
461*b7893ccfSSadaf Ebrahimi VmaAllocation alloc;
462*b7893ccfSSadaf Ebrahimi VmaAllocationInfo allocInfo;
463*b7893ccfSSadaf Ebrahimi vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
464*b7893ccfSSadaf Ebrahimi
465*b7893ccfSSadaf Ebrahimi VkMemoryPropertyFlags memFlags;
466*b7893ccfSSadaf Ebrahimi vmaGetMemoryTypeProperties(allocator, allocInfo.memoryType, &memFlags);
467*b7893ccfSSadaf Ebrahimi if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
468*b7893ccfSSadaf Ebrahimi {
469*b7893ccfSSadaf Ebrahimi // Allocation ended up in mappable memory. You can map it and access it directly.
470*b7893ccfSSadaf Ebrahimi void* mappedData;
471*b7893ccfSSadaf Ebrahimi vmaMapMemory(allocator, alloc, &mappedData);
472*b7893ccfSSadaf Ebrahimi memcpy(mappedData, &constantBufferData, sizeof(constantBufferData));
473*b7893ccfSSadaf Ebrahimi vmaUnmapMemory(allocator, alloc);
474*b7893ccfSSadaf Ebrahimi }
475*b7893ccfSSadaf Ebrahimi else
476*b7893ccfSSadaf Ebrahimi {
477*b7893ccfSSadaf Ebrahimi // Allocation ended up in non-mappable memory.
478*b7893ccfSSadaf Ebrahimi // You need to create CPU-side buffer in VMA_MEMORY_USAGE_CPU_ONLY and make a transfer.
479*b7893ccfSSadaf Ebrahimi }
480*b7893ccfSSadaf Ebrahimi \endcode
481*b7893ccfSSadaf Ebrahimi
482*b7893ccfSSadaf Ebrahimi You can even use #VMA_ALLOCATION_CREATE_MAPPED_BIT flag while creating allocations
483*b7893ccfSSadaf Ebrahimi that are not necessarily `HOST_VISIBLE` (e.g. using #VMA_MEMORY_USAGE_GPU_ONLY).
484*b7893ccfSSadaf Ebrahimi If the allocation ends up in memory type that is `HOST_VISIBLE`, it will be persistently mapped and you can use it directly.
485*b7893ccfSSadaf Ebrahimi If not, the flag is just ignored.
486*b7893ccfSSadaf Ebrahimi Example:
487*b7893ccfSSadaf Ebrahimi
488*b7893ccfSSadaf Ebrahimi \code
489*b7893ccfSSadaf Ebrahimi VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
490*b7893ccfSSadaf Ebrahimi bufCreateInfo.size = sizeof(ConstantBuffer);
491*b7893ccfSSadaf Ebrahimi bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
492*b7893ccfSSadaf Ebrahimi
493*b7893ccfSSadaf Ebrahimi VmaAllocationCreateInfo allocCreateInfo = {};
494*b7893ccfSSadaf Ebrahimi allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
495*b7893ccfSSadaf Ebrahimi allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
496*b7893ccfSSadaf Ebrahimi
497*b7893ccfSSadaf Ebrahimi VkBuffer buf;
498*b7893ccfSSadaf Ebrahimi VmaAllocation alloc;
499*b7893ccfSSadaf Ebrahimi VmaAllocationInfo allocInfo;
500*b7893ccfSSadaf Ebrahimi vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
501*b7893ccfSSadaf Ebrahimi
502*b7893ccfSSadaf Ebrahimi if(allocInfo.pUserData != nullptr)
503*b7893ccfSSadaf Ebrahimi {
504*b7893ccfSSadaf Ebrahimi // Allocation ended up in mappable memory.
505*b7893ccfSSadaf Ebrahimi // It's persistently mapped. You can access it directly.
506*b7893ccfSSadaf Ebrahimi memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData));
507*b7893ccfSSadaf Ebrahimi }
508*b7893ccfSSadaf Ebrahimi else
509*b7893ccfSSadaf Ebrahimi {
510*b7893ccfSSadaf Ebrahimi // Allocation ended up in non-mappable memory.
511*b7893ccfSSadaf Ebrahimi // You need to create CPU-side buffer in VMA_MEMORY_USAGE_CPU_ONLY and make a transfer.
512*b7893ccfSSadaf Ebrahimi }
513*b7893ccfSSadaf Ebrahimi \endcode
514*b7893ccfSSadaf Ebrahimi
515*b7893ccfSSadaf Ebrahimi
516*b7893ccfSSadaf Ebrahimi \page custom_memory_pools Custom memory pools
517*b7893ccfSSadaf Ebrahimi
518*b7893ccfSSadaf Ebrahimi A memory pool contains a number of `VkDeviceMemory` blocks.
519*b7893ccfSSadaf Ebrahimi The library automatically creates and manages default pool for each memory type available on the device.
520*b7893ccfSSadaf Ebrahimi Default memory pool automatically grows in size.
521*b7893ccfSSadaf Ebrahimi Size of allocated blocks is also variable and managed automatically.
522*b7893ccfSSadaf Ebrahimi
523*b7893ccfSSadaf Ebrahimi You can create custom pool and allocate memory out of it.
524*b7893ccfSSadaf Ebrahimi It can be useful if you want to:
525*b7893ccfSSadaf Ebrahimi
526*b7893ccfSSadaf Ebrahimi - Keep certain kind of allocations separate from others.
527*b7893ccfSSadaf Ebrahimi - Enforce particular, fixed size of Vulkan memory blocks.
528*b7893ccfSSadaf Ebrahimi - Limit maximum amount of Vulkan memory allocated for that pool.
529*b7893ccfSSadaf Ebrahimi - Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool.
530*b7893ccfSSadaf Ebrahimi
531*b7893ccfSSadaf Ebrahimi To use custom memory pools:
532*b7893ccfSSadaf Ebrahimi
533*b7893ccfSSadaf Ebrahimi -# Fill VmaPoolCreateInfo structure.
534*b7893ccfSSadaf Ebrahimi -# Call vmaCreatePool() to obtain #VmaPool handle.
535*b7893ccfSSadaf Ebrahimi -# When making an allocation, set VmaAllocationCreateInfo::pool to this handle.
536*b7893ccfSSadaf Ebrahimi You don't need to specify any other parameters of this structure, like `usage`.
537*b7893ccfSSadaf Ebrahimi
538*b7893ccfSSadaf Ebrahimi Example:
539*b7893ccfSSadaf Ebrahimi
540*b7893ccfSSadaf Ebrahimi \code
541*b7893ccfSSadaf Ebrahimi // Create a pool that can have at most 2 blocks, 128 MiB each.
542*b7893ccfSSadaf Ebrahimi VmaPoolCreateInfo poolCreateInfo = {};
543*b7893ccfSSadaf Ebrahimi poolCreateInfo.memoryTypeIndex = ...
544*b7893ccfSSadaf Ebrahimi poolCreateInfo.blockSize = 128ull * 1024 * 1024;
545*b7893ccfSSadaf Ebrahimi poolCreateInfo.maxBlockCount = 2;
546*b7893ccfSSadaf Ebrahimi
547*b7893ccfSSadaf Ebrahimi VmaPool pool;
548*b7893ccfSSadaf Ebrahimi vmaCreatePool(allocator, &poolCreateInfo, &pool);
549*b7893ccfSSadaf Ebrahimi
550*b7893ccfSSadaf Ebrahimi // Allocate a buffer out of it.
551*b7893ccfSSadaf Ebrahimi VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
552*b7893ccfSSadaf Ebrahimi bufCreateInfo.size = 1024;
553*b7893ccfSSadaf Ebrahimi bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
554*b7893ccfSSadaf Ebrahimi
555*b7893ccfSSadaf Ebrahimi VmaAllocationCreateInfo allocCreateInfo = {};
556*b7893ccfSSadaf Ebrahimi allocCreateInfo.pool = pool;
557*b7893ccfSSadaf Ebrahimi
558*b7893ccfSSadaf Ebrahimi VkBuffer buf;
559*b7893ccfSSadaf Ebrahimi VmaAllocation alloc;
560*b7893ccfSSadaf Ebrahimi VmaAllocationInfo allocInfo;
561*b7893ccfSSadaf Ebrahimi vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
562*b7893ccfSSadaf Ebrahimi \endcode
563*b7893ccfSSadaf Ebrahimi
564*b7893ccfSSadaf Ebrahimi You have to free all allocations made from this pool before destroying it.
565*b7893ccfSSadaf Ebrahimi
566*b7893ccfSSadaf Ebrahimi \code
567*b7893ccfSSadaf Ebrahimi vmaDestroyBuffer(allocator, buf, alloc);
568*b7893ccfSSadaf Ebrahimi vmaDestroyPool(allocator, pool);
569*b7893ccfSSadaf Ebrahimi \endcode
570*b7893ccfSSadaf Ebrahimi
571*b7893ccfSSadaf Ebrahimi \section custom_memory_pools_MemTypeIndex Choosing memory type index
572*b7893ccfSSadaf Ebrahimi
573*b7893ccfSSadaf Ebrahimi When creating a pool, you must explicitly specify memory type index.
574*b7893ccfSSadaf Ebrahimi To find the one suitable for your buffers or images, you can use helper functions
575*b7893ccfSSadaf Ebrahimi vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo().
576*b7893ccfSSadaf Ebrahimi You need to provide structures with example parameters of buffers or images
577*b7893ccfSSadaf Ebrahimi that you are going to create in that pool.
578*b7893ccfSSadaf Ebrahimi
579*b7893ccfSSadaf Ebrahimi \code
580*b7893ccfSSadaf Ebrahimi VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
581*b7893ccfSSadaf Ebrahimi exampleBufCreateInfo.size = 1024; // Whatever.
582*b7893ccfSSadaf Ebrahimi exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; // Change if needed.
583*b7893ccfSSadaf Ebrahimi
584*b7893ccfSSadaf Ebrahimi VmaAllocationCreateInfo allocCreateInfo = {};
585*b7893ccfSSadaf Ebrahimi allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; // Change if needed.
586*b7893ccfSSadaf Ebrahimi
587*b7893ccfSSadaf Ebrahimi uint32_t memTypeIndex;
588*b7893ccfSSadaf Ebrahimi vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex);
589*b7893ccfSSadaf Ebrahimi
590*b7893ccfSSadaf Ebrahimi VmaPoolCreateInfo poolCreateInfo = {};
591*b7893ccfSSadaf Ebrahimi poolCreateInfo.memoryTypeIndex = memTypeIndex;
592*b7893ccfSSadaf Ebrahimi // ...
593*b7893ccfSSadaf Ebrahimi \endcode
594*b7893ccfSSadaf Ebrahimi
595*b7893ccfSSadaf Ebrahimi When creating buffers/images allocated in that pool, provide following parameters:
596*b7893ccfSSadaf Ebrahimi
597*b7893ccfSSadaf Ebrahimi - `VkBufferCreateInfo`: Prefer to pass same parameters as above.
598*b7893ccfSSadaf Ebrahimi Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior.
599*b7893ccfSSadaf Ebrahimi Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers
600*b7893ccfSSadaf Ebrahimi or the other way around.
601*b7893ccfSSadaf Ebrahimi - VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member.
602*b7893ccfSSadaf Ebrahimi Other members are ignored anyway.
603*b7893ccfSSadaf Ebrahimi
604*b7893ccfSSadaf Ebrahimi \section linear_algorithm Linear allocation algorithm
605*b7893ccfSSadaf Ebrahimi
606*b7893ccfSSadaf Ebrahimi Each Vulkan memory block managed by this library has accompanying metadata that
607*b7893ccfSSadaf Ebrahimi keeps track of used and unused regions. By default, the metadata structure and
608*b7893ccfSSadaf Ebrahimi algorithm tries to find best place for new allocations among free regions to
609*b7893ccfSSadaf Ebrahimi optimize memory usage. This way you can allocate and free objects in any order.
610*b7893ccfSSadaf Ebrahimi
611*b7893ccfSSadaf Ebrahimi 
612*b7893ccfSSadaf Ebrahimi
613*b7893ccfSSadaf Ebrahimi Sometimes there is a need to use simpler, linear allocation algorithm. You can
614*b7893ccfSSadaf Ebrahimi create custom pool that uses such algorithm by adding flag
615*b7893ccfSSadaf Ebrahimi #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating
616*b7893ccfSSadaf Ebrahimi #VmaPool object. Then an alternative metadata management is used. It always
617*b7893ccfSSadaf Ebrahimi creates new allocations after last one and doesn't reuse free regions after
618*b7893ccfSSadaf Ebrahimi allocations freed in the middle. It results in better allocation performance and
619*b7893ccfSSadaf Ebrahimi less memory consumed by metadata.
620*b7893ccfSSadaf Ebrahimi
621*b7893ccfSSadaf Ebrahimi 
622*b7893ccfSSadaf Ebrahimi
623*b7893ccfSSadaf Ebrahimi With this one flag, you can create a custom pool that can be used in many ways:
624*b7893ccfSSadaf Ebrahimi free-at-once, stack, double stack, and ring buffer. See below for details.
625*b7893ccfSSadaf Ebrahimi
626*b7893ccfSSadaf Ebrahimi \subsection linear_algorithm_free_at_once Free-at-once
627*b7893ccfSSadaf Ebrahimi
628*b7893ccfSSadaf Ebrahimi In a pool that uses linear algorithm, you still need to free all the allocations
629*b7893ccfSSadaf Ebrahimi individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free
630*b7893ccfSSadaf Ebrahimi them in any order. New allocations are always made after last one - free space
631*b7893ccfSSadaf Ebrahimi in the middle is not reused. However, when you release all the allocation and
632*b7893ccfSSadaf Ebrahimi the pool becomes empty, allocation starts from the beginning again. This way you
633*b7893ccfSSadaf Ebrahimi can use linear algorithm to speed up creation of allocations that you are going
634*b7893ccfSSadaf Ebrahimi to release all at once.
635*b7893ccfSSadaf Ebrahimi
636*b7893ccfSSadaf Ebrahimi 
637*b7893ccfSSadaf Ebrahimi
638*b7893ccfSSadaf Ebrahimi This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
639*b7893ccfSSadaf Ebrahimi value that allows multiple memory blocks.
640*b7893ccfSSadaf Ebrahimi
641*b7893ccfSSadaf Ebrahimi \subsection linear_algorithm_stack Stack
642*b7893ccfSSadaf Ebrahimi
643*b7893ccfSSadaf Ebrahimi When you free an allocation that was created last, its space can be reused.
644*b7893ccfSSadaf Ebrahimi Thanks to this, if you always release allocations in the order opposite to their
645*b7893ccfSSadaf Ebrahimi creation (LIFO - Last In First Out), you can achieve behavior of a stack.
646*b7893ccfSSadaf Ebrahimi
647*b7893ccfSSadaf Ebrahimi 
648*b7893ccfSSadaf Ebrahimi
649*b7893ccfSSadaf Ebrahimi This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
650*b7893ccfSSadaf Ebrahimi value that allows multiple memory blocks.
651*b7893ccfSSadaf Ebrahimi
652*b7893ccfSSadaf Ebrahimi \subsection linear_algorithm_double_stack Double stack
653*b7893ccfSSadaf Ebrahimi
654*b7893ccfSSadaf Ebrahimi The space reserved by a custom pool with linear algorithm may be used by two
655*b7893ccfSSadaf Ebrahimi stacks:
656*b7893ccfSSadaf Ebrahimi
657*b7893ccfSSadaf Ebrahimi - First, default one, growing up from offset 0.
658*b7893ccfSSadaf Ebrahimi - Second, "upper" one, growing down from the end towards lower offsets.
659*b7893ccfSSadaf Ebrahimi
660*b7893ccfSSadaf Ebrahimi To make allocation from upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
661*b7893ccfSSadaf Ebrahimi to VmaAllocationCreateInfo::flags.
662*b7893ccfSSadaf Ebrahimi
663*b7893ccfSSadaf Ebrahimi 
664*b7893ccfSSadaf Ebrahimi
665*b7893ccfSSadaf Ebrahimi Double stack is available only in pools with one memory block -
666*b7893ccfSSadaf Ebrahimi VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
667*b7893ccfSSadaf Ebrahimi
668*b7893ccfSSadaf Ebrahimi When the two stacks' ends meet so there is not enough space between them for a
669*b7893ccfSSadaf Ebrahimi new allocation, such allocation fails with usual
670*b7893ccfSSadaf Ebrahimi `VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
671*b7893ccfSSadaf Ebrahimi
672*b7893ccfSSadaf Ebrahimi \subsection linear_algorithm_ring_buffer Ring buffer
673*b7893ccfSSadaf Ebrahimi
674*b7893ccfSSadaf Ebrahimi When you free some allocations from the beginning and there is not enough free space
675*b7893ccfSSadaf Ebrahimi for a new one at the end of a pool, allocator's "cursor" wraps around to the
676*b7893ccfSSadaf Ebrahimi beginning and starts allocation there. Thanks to this, if you always release
677*b7893ccfSSadaf Ebrahimi allocations in the same order as you created them (FIFO - First In First Out),
678*b7893ccfSSadaf Ebrahimi you can achieve behavior of a ring buffer / queue.
679*b7893ccfSSadaf Ebrahimi
680*b7893ccfSSadaf Ebrahimi 
681*b7893ccfSSadaf Ebrahimi
682*b7893ccfSSadaf Ebrahimi Pools with linear algorithm support [lost allocations](@ref lost_allocations) when used as ring buffer.
683*b7893ccfSSadaf Ebrahimi If there is not enough free space for a new allocation, but existing allocations
684*b7893ccfSSadaf Ebrahimi from the front of the queue can become lost, they become lost and the allocation
685*b7893ccfSSadaf Ebrahimi succeeds.
686*b7893ccfSSadaf Ebrahimi
687*b7893ccfSSadaf Ebrahimi 
688*b7893ccfSSadaf Ebrahimi
689*b7893ccfSSadaf Ebrahimi Ring buffer is available only in pools with one memory block -
690*b7893ccfSSadaf Ebrahimi VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
691*b7893ccfSSadaf Ebrahimi
692*b7893ccfSSadaf Ebrahimi \section buddy_algorithm Buddy allocation algorithm
693*b7893ccfSSadaf Ebrahimi
694*b7893ccfSSadaf Ebrahimi There is another allocation algorithm that can be used with custom pools, called
695*b7893ccfSSadaf Ebrahimi "buddy". Its internal data structure is based on a tree of blocks, each having
696*b7893ccfSSadaf Ebrahimi size that is a power of two and a half of its parent's size. When you want to
697*b7893ccfSSadaf Ebrahimi allocate memory of certain size, a free node in the tree is located. If it's too
698*b7893ccfSSadaf Ebrahimi large, it is recursively split into two halves (called "buddies"). However, if
699*b7893ccfSSadaf Ebrahimi requested allocation size is not a power of two, the size of a tree node is
700*b7893ccfSSadaf Ebrahimi aligned up to the nearest power of two and the remaining space is wasted. When
701*b7893ccfSSadaf Ebrahimi two buddy nodes become free, they are merged back into one larger node.
702*b7893ccfSSadaf Ebrahimi
703*b7893ccfSSadaf Ebrahimi 
704*b7893ccfSSadaf Ebrahimi
705*b7893ccfSSadaf Ebrahimi The advantage of buddy allocation algorithm over default algorithm is faster
706*b7893ccfSSadaf Ebrahimi allocation and deallocation, as well as smaller external fragmentation. The
707*b7893ccfSSadaf Ebrahimi disadvantage is more wasted space (internal fragmentation).
708*b7893ccfSSadaf Ebrahimi
709*b7893ccfSSadaf Ebrahimi For more information, please read ["Buddy memory allocation" on Wikipedia](https://en.wikipedia.org/wiki/Buddy_memory_allocation)
710*b7893ccfSSadaf Ebrahimi or other sources that describe this concept in general.
711*b7893ccfSSadaf Ebrahimi
712*b7893ccfSSadaf Ebrahimi To use buddy allocation algorithm with a custom pool, add flag
713*b7893ccfSSadaf Ebrahimi #VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating
714*b7893ccfSSadaf Ebrahimi #VmaPool object.
715*b7893ccfSSadaf Ebrahimi
716*b7893ccfSSadaf Ebrahimi Several limitations apply to pools that use buddy algorithm:
717*b7893ccfSSadaf Ebrahimi
718*b7893ccfSSadaf Ebrahimi - It is recommended to use VmaPoolCreateInfo::blockSize that is a power of two.
719*b7893ccfSSadaf Ebrahimi Otherwise, only largest power of two smaller than the size is used for
720*b7893ccfSSadaf Ebrahimi allocations. The remaining space always stays unused.
721*b7893ccfSSadaf Ebrahimi - [Margins](@ref debugging_memory_usage_margins) and
722*b7893ccfSSadaf Ebrahimi [corruption detection](@ref debugging_memory_usage_corruption_detection)
723*b7893ccfSSadaf Ebrahimi don't work in such pools.
724*b7893ccfSSadaf Ebrahimi - [Lost allocations](@ref lost_allocations) don't work in such pools. You can
725*b7893ccfSSadaf Ebrahimi use them, but they never become lost. Support may be added in the future.
726*b7893ccfSSadaf Ebrahimi - [Defragmentation](@ref defragmentation) doesn't work with allocations made from
727*b7893ccfSSadaf Ebrahimi such pool.
728*b7893ccfSSadaf Ebrahimi
729*b7893ccfSSadaf Ebrahimi \page defragmentation Defragmentation
730*b7893ccfSSadaf Ebrahimi
731*b7893ccfSSadaf Ebrahimi Interleaved allocations and deallocations of many objects of varying size can
732*b7893ccfSSadaf Ebrahimi cause fragmentation over time, which can lead to a situation where the library is unable
733*b7893ccfSSadaf Ebrahimi to find a continuous range of free memory for a new allocation despite there is
734*b7893ccfSSadaf Ebrahimi enough free space, just scattered across many small free ranges between existing
735*b7893ccfSSadaf Ebrahimi allocations.
736*b7893ccfSSadaf Ebrahimi
737*b7893ccfSSadaf Ebrahimi To mitigate this problem, you can use defragmentation feature:
738*b7893ccfSSadaf Ebrahimi structure #VmaDefragmentationInfo2, function vmaDefragmentationBegin(), vmaDefragmentationEnd().
739*b7893ccfSSadaf Ebrahimi Given set of allocations,
740*b7893ccfSSadaf Ebrahimi this function can move them to compact used memory, ensure more continuous free
741*b7893ccfSSadaf Ebrahimi space and possibly also free some `VkDeviceMemory` blocks.
742*b7893ccfSSadaf Ebrahimi
743*b7893ccfSSadaf Ebrahimi What the defragmentation does is:
744*b7893ccfSSadaf Ebrahimi
745*b7893ccfSSadaf Ebrahimi - Updates #VmaAllocation objects to point to new `VkDeviceMemory` and offset.
746*b7893ccfSSadaf Ebrahimi After allocation has been moved, its VmaAllocationInfo::deviceMemory and/or
747*b7893ccfSSadaf Ebrahimi VmaAllocationInfo::offset changes. You must query them again using
748*b7893ccfSSadaf Ebrahimi vmaGetAllocationInfo() if you need them.
749*b7893ccfSSadaf Ebrahimi - Moves actual data in memory.
750*b7893ccfSSadaf Ebrahimi
751*b7893ccfSSadaf Ebrahimi What it doesn't do, so you need to do it yourself:
752*b7893ccfSSadaf Ebrahimi
753*b7893ccfSSadaf Ebrahimi - Recreate buffers and images that were bound to allocations that were defragmented and
754*b7893ccfSSadaf Ebrahimi bind them with their new places in memory.
755*b7893ccfSSadaf Ebrahimi You must use `vkDestroyBuffer()`, `vkDestroyImage()`,
756*b7893ccfSSadaf Ebrahimi `vkCreateBuffer()`, `vkCreateImage()` for that purpose and NOT vmaDestroyBuffer(),
757*b7893ccfSSadaf Ebrahimi vmaDestroyImage(), vmaCreateBuffer(), vmaCreateImage(), because you don't need to
758*b7893ccfSSadaf Ebrahimi destroy or create allocation objects!
759*b7893ccfSSadaf Ebrahimi - Recreate views and update descriptors that point to these buffers and images.
760*b7893ccfSSadaf Ebrahimi
761*b7893ccfSSadaf Ebrahimi \section defragmentation_cpu Defragmenting CPU memory
762*b7893ccfSSadaf Ebrahimi
763*b7893ccfSSadaf Ebrahimi Following example demonstrates how you can run defragmentation on CPU.
764*b7893ccfSSadaf Ebrahimi Only allocations created in memory types that are `HOST_VISIBLE` can be defragmented.
765*b7893ccfSSadaf Ebrahimi Others are ignored.
766*b7893ccfSSadaf Ebrahimi
767*b7893ccfSSadaf Ebrahimi The way it works is:
768*b7893ccfSSadaf Ebrahimi
769*b7893ccfSSadaf Ebrahimi - It temporarily maps entire memory blocks when necessary.
770*b7893ccfSSadaf Ebrahimi - It moves data using `memmove()` function.
771*b7893ccfSSadaf Ebrahimi
772*b7893ccfSSadaf Ebrahimi \code
773*b7893ccfSSadaf Ebrahimi // Given following variables already initialized:
774*b7893ccfSSadaf Ebrahimi VkDevice device;
775*b7893ccfSSadaf Ebrahimi VmaAllocator allocator;
776*b7893ccfSSadaf Ebrahimi std::vector<VkBuffer> buffers;
777*b7893ccfSSadaf Ebrahimi std::vector<VmaAllocation> allocations;
778*b7893ccfSSadaf Ebrahimi
779*b7893ccfSSadaf Ebrahimi
780*b7893ccfSSadaf Ebrahimi const uint32_t allocCount = (uint32_t)allocations.size();
781*b7893ccfSSadaf Ebrahimi std::vector<VkBool32> allocationsChanged(allocCount);
782*b7893ccfSSadaf Ebrahimi
783*b7893ccfSSadaf Ebrahimi VmaDefragmentationInfo2 defragInfo = {};
784*b7893ccfSSadaf Ebrahimi defragInfo.allocationCount = allocCount;
785*b7893ccfSSadaf Ebrahimi defragInfo.pAllocations = allocations.data();
786*b7893ccfSSadaf Ebrahimi defragInfo.pAllocationsChanged = allocationsChanged.data();
787*b7893ccfSSadaf Ebrahimi defragInfo.maxCpuBytesToMove = VK_WHOLE_SIZE; // No limit.
788*b7893ccfSSadaf Ebrahimi defragInfo.maxCpuAllocationsToMove = UINT32_MAX; // No limit.
789*b7893ccfSSadaf Ebrahimi
790*b7893ccfSSadaf Ebrahimi VmaDefragmentationContext defragCtx;
791*b7893ccfSSadaf Ebrahimi vmaDefragmentationBegin(allocator, &defragInfo, nullptr, &defragCtx);
792*b7893ccfSSadaf Ebrahimi vmaDefragmentationEnd(allocator, defragCtx);
793*b7893ccfSSadaf Ebrahimi
794*b7893ccfSSadaf Ebrahimi for(uint32_t i = 0; i < allocCount; ++i)
795*b7893ccfSSadaf Ebrahimi {
796*b7893ccfSSadaf Ebrahimi if(allocationsChanged[i])
797*b7893ccfSSadaf Ebrahimi {
798*b7893ccfSSadaf Ebrahimi // Destroy buffer that is immutably bound to memory region which is no longer valid.
799*b7893ccfSSadaf Ebrahimi vkDestroyBuffer(device, buffers[i], nullptr);
800*b7893ccfSSadaf Ebrahimi
801*b7893ccfSSadaf Ebrahimi // Create new buffer with same parameters.
802*b7893ccfSSadaf Ebrahimi VkBufferCreateInfo bufferInfo = ...;
803*b7893ccfSSadaf Ebrahimi vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]);
804*b7893ccfSSadaf Ebrahimi
805*b7893ccfSSadaf Ebrahimi // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning.
806*b7893ccfSSadaf Ebrahimi
807*b7893ccfSSadaf Ebrahimi // Bind new buffer to new memory region. Data contained in it is already moved.
808*b7893ccfSSadaf Ebrahimi VmaAllocationInfo allocInfo;
809*b7893ccfSSadaf Ebrahimi vmaGetAllocationInfo(allocator, allocations[i], &allocInfo);
810*b7893ccfSSadaf Ebrahimi vkBindBufferMemory(device, buffers[i], allocInfo.deviceMemory, allocInfo.offset);
811*b7893ccfSSadaf Ebrahimi }
812*b7893ccfSSadaf Ebrahimi }
813*b7893ccfSSadaf Ebrahimi \endcode
814*b7893ccfSSadaf Ebrahimi
815*b7893ccfSSadaf Ebrahimi Setting VmaDefragmentationInfo2::pAllocationsChanged is optional.
816*b7893ccfSSadaf Ebrahimi This output array tells whether particular allocation in VmaDefragmentationInfo2::pAllocations at the same index
817*b7893ccfSSadaf Ebrahimi has been modified during defragmentation.
818*b7893ccfSSadaf Ebrahimi You can pass null, but you then need to query every allocation passed to defragmentation
819*b7893ccfSSadaf Ebrahimi for new parameters using vmaGetAllocationInfo() if you might need to recreate and rebind a buffer or image associated with it.
820*b7893ccfSSadaf Ebrahimi
821*b7893ccfSSadaf Ebrahimi If you use [Custom memory pools](@ref choosing_memory_type_custom_memory_pools),
822*b7893ccfSSadaf Ebrahimi you can fill VmaDefragmentationInfo2::poolCount and VmaDefragmentationInfo2::pPools
823*b7893ccfSSadaf Ebrahimi instead of VmaDefragmentationInfo2::allocationCount and VmaDefragmentationInfo2::pAllocations
824*b7893ccfSSadaf Ebrahimi to defragment all allocations in given pools.
825*b7893ccfSSadaf Ebrahimi You cannot use VmaDefragmentationInfo2::pAllocationsChanged in that case.
826*b7893ccfSSadaf Ebrahimi You can also combine both methods.
827*b7893ccfSSadaf Ebrahimi
828*b7893ccfSSadaf Ebrahimi \section defragmentation_gpu Defragmenting GPU memory
829*b7893ccfSSadaf Ebrahimi
830*b7893ccfSSadaf Ebrahimi It is also possible to defragment allocations created in memory types that are not `HOST_VISIBLE`.
831*b7893ccfSSadaf Ebrahimi To do that, you need to pass a command buffer that meets requirements as described in
832*b7893ccfSSadaf Ebrahimi VmaDefragmentationInfo2::commandBuffer. The way it works is:
833*b7893ccfSSadaf Ebrahimi
834*b7893ccfSSadaf Ebrahimi - It creates temporary buffers and binds them to entire memory blocks when necessary.
835*b7893ccfSSadaf Ebrahimi - It issues `vkCmdCopyBuffer()` to passed command buffer.
836*b7893ccfSSadaf Ebrahimi
837*b7893ccfSSadaf Ebrahimi Example:
838*b7893ccfSSadaf Ebrahimi
839*b7893ccfSSadaf Ebrahimi \code
840*b7893ccfSSadaf Ebrahimi // Given following variables already initialized:
841*b7893ccfSSadaf Ebrahimi VkDevice device;
842*b7893ccfSSadaf Ebrahimi VmaAllocator allocator;
843*b7893ccfSSadaf Ebrahimi VkCommandBuffer commandBuffer;
844*b7893ccfSSadaf Ebrahimi std::vector<VkBuffer> buffers;
845*b7893ccfSSadaf Ebrahimi std::vector<VmaAllocation> allocations;
846*b7893ccfSSadaf Ebrahimi
847*b7893ccfSSadaf Ebrahimi
848*b7893ccfSSadaf Ebrahimi const uint32_t allocCount = (uint32_t)allocations.size();
849*b7893ccfSSadaf Ebrahimi std::vector<VkBool32> allocationsChanged(allocCount);
850*b7893ccfSSadaf Ebrahimi
851*b7893ccfSSadaf Ebrahimi VkCommandBufferBeginInfo cmdBufBeginInfo = ...;
852*b7893ccfSSadaf Ebrahimi vkBeginCommandBuffer(commandBuffer, &cmdBufBeginInfo);
853*b7893ccfSSadaf Ebrahimi
854*b7893ccfSSadaf Ebrahimi VmaDefragmentationInfo2 defragInfo = {};
855*b7893ccfSSadaf Ebrahimi defragInfo.allocationCount = allocCount;
856*b7893ccfSSadaf Ebrahimi defragInfo.pAllocations = allocations.data();
857*b7893ccfSSadaf Ebrahimi defragInfo.pAllocationsChanged = allocationsChanged.data();
858*b7893ccfSSadaf Ebrahimi defragInfo.maxGpuBytesToMove = VK_WHOLE_SIZE; // Notice it's "GPU" this time.
859*b7893ccfSSadaf Ebrahimi defragInfo.maxGpuAllocationsToMove = UINT32_MAX; // Notice it's "GPU" this time.
860*b7893ccfSSadaf Ebrahimi defragInfo.commandBuffer = commandBuffer;
861*b7893ccfSSadaf Ebrahimi
862*b7893ccfSSadaf Ebrahimi VmaDefragmentationContext defragCtx;
863*b7893ccfSSadaf Ebrahimi vmaDefragmentationBegin(allocator, &defragInfo, nullptr, &defragCtx);
864*b7893ccfSSadaf Ebrahimi
865*b7893ccfSSadaf Ebrahimi vkEndCommandBuffer(commandBuffer);
866*b7893ccfSSadaf Ebrahimi
867*b7893ccfSSadaf Ebrahimi // Submit commandBuffer.
868*b7893ccfSSadaf Ebrahimi // Wait for a fence that ensures commandBuffer execution finished.
869*b7893ccfSSadaf Ebrahimi
870*b7893ccfSSadaf Ebrahimi vmaDefragmentationEnd(allocator, defragCtx);
871*b7893ccfSSadaf Ebrahimi
872*b7893ccfSSadaf Ebrahimi for(uint32_t i = 0; i < allocCount; ++i)
873*b7893ccfSSadaf Ebrahimi {
874*b7893ccfSSadaf Ebrahimi if(allocationsChanged[i])
875*b7893ccfSSadaf Ebrahimi {
876*b7893ccfSSadaf Ebrahimi // Destroy buffer that is immutably bound to memory region which is no longer valid.
877*b7893ccfSSadaf Ebrahimi vkDestroyBuffer(device, buffers[i], nullptr);
878*b7893ccfSSadaf Ebrahimi
879*b7893ccfSSadaf Ebrahimi // Create new buffer with same parameters.
880*b7893ccfSSadaf Ebrahimi VkBufferCreateInfo bufferInfo = ...;
881*b7893ccfSSadaf Ebrahimi vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]);
882*b7893ccfSSadaf Ebrahimi
883*b7893ccfSSadaf Ebrahimi // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning.
884*b7893ccfSSadaf Ebrahimi
885*b7893ccfSSadaf Ebrahimi // Bind new buffer to new memory region. Data contained in it is already moved.
886*b7893ccfSSadaf Ebrahimi VmaAllocationInfo allocInfo;
887*b7893ccfSSadaf Ebrahimi vmaGetAllocationInfo(allocator, allocations[i], &allocInfo);
888*b7893ccfSSadaf Ebrahimi vkBindBufferMemory(device, buffers[i], allocInfo.deviceMemory, allocInfo.offset);
889*b7893ccfSSadaf Ebrahimi }
890*b7893ccfSSadaf Ebrahimi }
891*b7893ccfSSadaf Ebrahimi \endcode
892*b7893ccfSSadaf Ebrahimi
893*b7893ccfSSadaf Ebrahimi You can combine these two methods by specifying non-zero `maxGpu*` as well as `maxCpu*` parameters.
894*b7893ccfSSadaf Ebrahimi The library automatically chooses best method to defragment each memory pool.
895*b7893ccfSSadaf Ebrahimi
896*b7893ccfSSadaf Ebrahimi You may try not to block your entire program to wait until defragmentation finishes,
897*b7893ccfSSadaf Ebrahimi but do it in the background, as long as you carefully fullfill requirements described
898*b7893ccfSSadaf Ebrahimi in function vmaDefragmentationBegin().
899*b7893ccfSSadaf Ebrahimi
900*b7893ccfSSadaf Ebrahimi \section defragmentation_additional_notes Additional notes
901*b7893ccfSSadaf Ebrahimi
902*b7893ccfSSadaf Ebrahimi While using defragmentation, you may experience validation layer warnings, which you just need to ignore.
903*b7893ccfSSadaf Ebrahimi See [Validation layer warnings](@ref general_considerations_validation_layer_warnings).
904*b7893ccfSSadaf Ebrahimi
905*b7893ccfSSadaf Ebrahimi If you defragment allocations bound to images, these images should be created with
906*b7893ccfSSadaf Ebrahimi `VK_IMAGE_CREATE_ALIAS_BIT` flag, to make sure that new image created with same
907*b7893ccfSSadaf Ebrahimi parameters and pointing to data copied to another memory region will interpret
908*b7893ccfSSadaf Ebrahimi its contents consistently. Otherwise you may experience corrupted data on some
909*b7893ccfSSadaf Ebrahimi implementations, e.g. due to different pixel swizzling used internally by the graphics driver.
910*b7893ccfSSadaf Ebrahimi
911*b7893ccfSSadaf Ebrahimi If you defragment allocations bound to images, new images to be bound to new
912*b7893ccfSSadaf Ebrahimi memory region after defragmentation should be created with `VK_IMAGE_LAYOUT_PREINITIALIZED`
913*b7893ccfSSadaf Ebrahimi and then transitioned to their original layout from before defragmentation using
914*b7893ccfSSadaf Ebrahimi an image memory barrier.
915*b7893ccfSSadaf Ebrahimi
916*b7893ccfSSadaf Ebrahimi Please don't expect memory to be fully compacted after defragmentation.
917*b7893ccfSSadaf Ebrahimi Algorithms inside are based on some heuristics that try to maximize number of Vulkan
918*b7893ccfSSadaf Ebrahimi memory blocks to make totally empty to release them, as well as to maximimze continuous
919*b7893ccfSSadaf Ebrahimi empty space inside remaining blocks, while minimizing the number and size of allocations that
920*b7893ccfSSadaf Ebrahimi need to be moved. Some fragmentation may still remain - this is normal.
921*b7893ccfSSadaf Ebrahimi
922*b7893ccfSSadaf Ebrahimi \section defragmentation_custom_algorithm Writing custom defragmentation algorithm
923*b7893ccfSSadaf Ebrahimi
924*b7893ccfSSadaf Ebrahimi If you want to implement your own, custom defragmentation algorithm,
925*b7893ccfSSadaf Ebrahimi there is infrastructure prepared for that,
926*b7893ccfSSadaf Ebrahimi but it is not exposed through the library API - you need to hack its source code.
927*b7893ccfSSadaf Ebrahimi Here are steps needed to do this:
928*b7893ccfSSadaf Ebrahimi
929*b7893ccfSSadaf Ebrahimi -# Main thing you need to do is to define your own class derived from base abstract
930*b7893ccfSSadaf Ebrahimi class `VmaDefragmentationAlgorithm` and implement your version of its pure virtual methods.
931*b7893ccfSSadaf Ebrahimi See definition and comments of this class for details.
932*b7893ccfSSadaf Ebrahimi -# Your code needs to interact with device memory block metadata.
933*b7893ccfSSadaf Ebrahimi If you need more access to its data than it's provided by its public interface,
934*b7893ccfSSadaf Ebrahimi declare your new class as a friend class e.g. in class `VmaBlockMetadata_Generic`.
935*b7893ccfSSadaf Ebrahimi -# If you want to create a flag that would enable your algorithm or pass some additional
936*b7893ccfSSadaf Ebrahimi flags to configure it, add them to `VmaDefragmentationFlagBits` and use them in
937*b7893ccfSSadaf Ebrahimi VmaDefragmentationInfo2::flags.
938*b7893ccfSSadaf Ebrahimi -# Modify function `VmaBlockVectorDefragmentationContext::Begin` to create object
939*b7893ccfSSadaf Ebrahimi of your new class whenever needed.
940*b7893ccfSSadaf Ebrahimi
941*b7893ccfSSadaf Ebrahimi
942*b7893ccfSSadaf Ebrahimi \page lost_allocations Lost allocations
943*b7893ccfSSadaf Ebrahimi
944*b7893ccfSSadaf Ebrahimi If your game oversubscribes video memory, if may work OK in previous-generation
945*b7893ccfSSadaf Ebrahimi graphics APIs (DirectX 9, 10, 11, OpenGL) because resources are automatically
946*b7893ccfSSadaf Ebrahimi paged to system RAM. In Vulkan you can't do it because when you run out of
947*b7893ccfSSadaf Ebrahimi memory, an allocation just fails. If you have more data (e.g. textures) that can
948*b7893ccfSSadaf Ebrahimi fit into VRAM and you don't need it all at once, you may want to upload them to
949*b7893ccfSSadaf Ebrahimi GPU on demand and "push out" ones that are not used for a long time to make room
950*b7893ccfSSadaf Ebrahimi for the new ones, effectively using VRAM (or a cartain memory pool) as a form of
951*b7893ccfSSadaf Ebrahimi cache. Vulkan Memory Allocator can help you with that by supporting a concept of
952*b7893ccfSSadaf Ebrahimi "lost allocations".
953*b7893ccfSSadaf Ebrahimi
954*b7893ccfSSadaf Ebrahimi To create an allocation that can become lost, include #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
955*b7893ccfSSadaf Ebrahimi flag in VmaAllocationCreateInfo::flags. Before using a buffer or image bound to
956*b7893ccfSSadaf Ebrahimi such allocation in every new frame, you need to query it if it's not lost.
957*b7893ccfSSadaf Ebrahimi To check it, call vmaTouchAllocation().
958*b7893ccfSSadaf Ebrahimi If the allocation is lost, you should not use it or buffer/image bound to it.
959*b7893ccfSSadaf Ebrahimi You mustn't forget to destroy this allocation and this buffer/image.
960*b7893ccfSSadaf Ebrahimi vmaGetAllocationInfo() can also be used for checking status of the allocation.
961*b7893ccfSSadaf Ebrahimi Allocation is lost when returned VmaAllocationInfo::deviceMemory == `VK_NULL_HANDLE`.
962*b7893ccfSSadaf Ebrahimi
963*b7893ccfSSadaf Ebrahimi To create an allocation that can make some other allocations lost to make room
964*b7893ccfSSadaf Ebrahimi for it, use #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag. You will
965*b7893ccfSSadaf Ebrahimi usually use both flags #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT and
966*b7893ccfSSadaf Ebrahimi #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT at the same time.
967*b7893ccfSSadaf Ebrahimi
968*b7893ccfSSadaf Ebrahimi Warning! Current implementation uses quite naive, brute force algorithm,
969*b7893ccfSSadaf Ebrahimi which can make allocation calls that use #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
970*b7893ccfSSadaf Ebrahimi flag quite slow. A new, more optimal algorithm and data structure to speed this
971*b7893ccfSSadaf Ebrahimi up is planned for the future.
972*b7893ccfSSadaf Ebrahimi
973*b7893ccfSSadaf Ebrahimi <b>Q: When interleaving creation of new allocations with usage of existing ones,
974*b7893ccfSSadaf Ebrahimi how do you make sure that an allocation won't become lost while it's used in the
975*b7893ccfSSadaf Ebrahimi current frame?</b>
976*b7893ccfSSadaf Ebrahimi
977*b7893ccfSSadaf Ebrahimi It is ensured because vmaTouchAllocation() / vmaGetAllocationInfo() not only returns allocation
978*b7893ccfSSadaf Ebrahimi status/parameters and checks whether it's not lost, but when it's not, it also
979*b7893ccfSSadaf Ebrahimi atomically marks it as used in the current frame, which makes it impossible to
980*b7893ccfSSadaf Ebrahimi become lost in that frame. It uses lockless algorithm, so it works fast and
981*b7893ccfSSadaf Ebrahimi doesn't involve locking any internal mutex.
982*b7893ccfSSadaf Ebrahimi
983*b7893ccfSSadaf Ebrahimi <b>Q: What if my allocation may still be in use by the GPU when it's rendering a
984*b7893ccfSSadaf Ebrahimi previous frame while I already submit new frame on the CPU?</b>
985*b7893ccfSSadaf Ebrahimi
986*b7893ccfSSadaf Ebrahimi You can make sure that allocations "touched" by vmaTouchAllocation() / vmaGetAllocationInfo() will not
987*b7893ccfSSadaf Ebrahimi become lost for a number of additional frames back from the current one by
988*b7893ccfSSadaf Ebrahimi specifying this number as VmaAllocatorCreateInfo::frameInUseCount (for default
989*b7893ccfSSadaf Ebrahimi memory pool) and VmaPoolCreateInfo::frameInUseCount (for custom pool).
990*b7893ccfSSadaf Ebrahimi
991*b7893ccfSSadaf Ebrahimi <b>Q: How do you inform the library when new frame starts?</b>
992*b7893ccfSSadaf Ebrahimi
993*b7893ccfSSadaf Ebrahimi You need to call function vmaSetCurrentFrameIndex().
994*b7893ccfSSadaf Ebrahimi
995*b7893ccfSSadaf Ebrahimi Example code:
996*b7893ccfSSadaf Ebrahimi
997*b7893ccfSSadaf Ebrahimi \code
998*b7893ccfSSadaf Ebrahimi struct MyBuffer
999*b7893ccfSSadaf Ebrahimi {
1000*b7893ccfSSadaf Ebrahimi VkBuffer m_Buf = nullptr;
1001*b7893ccfSSadaf Ebrahimi VmaAllocation m_Alloc = nullptr;
1002*b7893ccfSSadaf Ebrahimi
1003*b7893ccfSSadaf Ebrahimi // Called when the buffer is really needed in the current frame.
1004*b7893ccfSSadaf Ebrahimi void EnsureBuffer();
1005*b7893ccfSSadaf Ebrahimi };
1006*b7893ccfSSadaf Ebrahimi
1007*b7893ccfSSadaf Ebrahimi void MyBuffer::EnsureBuffer()
1008*b7893ccfSSadaf Ebrahimi {
1009*b7893ccfSSadaf Ebrahimi // Buffer has been created.
1010*b7893ccfSSadaf Ebrahimi if(m_Buf != VK_NULL_HANDLE)
1011*b7893ccfSSadaf Ebrahimi {
1012*b7893ccfSSadaf Ebrahimi // Check if its allocation is not lost + mark it as used in current frame.
1013*b7893ccfSSadaf Ebrahimi if(vmaTouchAllocation(allocator, m_Alloc))
1014*b7893ccfSSadaf Ebrahimi {
1015*b7893ccfSSadaf Ebrahimi // It's all OK - safe to use m_Buf.
1016*b7893ccfSSadaf Ebrahimi return;
1017*b7893ccfSSadaf Ebrahimi }
1018*b7893ccfSSadaf Ebrahimi }
1019*b7893ccfSSadaf Ebrahimi
1020*b7893ccfSSadaf Ebrahimi // Buffer not yet exists or lost - destroy and recreate it.
1021*b7893ccfSSadaf Ebrahimi
1022*b7893ccfSSadaf Ebrahimi vmaDestroyBuffer(allocator, m_Buf, m_Alloc);
1023*b7893ccfSSadaf Ebrahimi
1024*b7893ccfSSadaf Ebrahimi VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1025*b7893ccfSSadaf Ebrahimi bufCreateInfo.size = 1024;
1026*b7893ccfSSadaf Ebrahimi bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
1027*b7893ccfSSadaf Ebrahimi
1028*b7893ccfSSadaf Ebrahimi VmaAllocationCreateInfo allocCreateInfo = {};
1029*b7893ccfSSadaf Ebrahimi allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
1030*b7893ccfSSadaf Ebrahimi allocCreateInfo.flags = VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT |
1031*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT;
1032*b7893ccfSSadaf Ebrahimi
1033*b7893ccfSSadaf Ebrahimi vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &m_Buf, &m_Alloc, nullptr);
1034*b7893ccfSSadaf Ebrahimi }
1035*b7893ccfSSadaf Ebrahimi \endcode
1036*b7893ccfSSadaf Ebrahimi
1037*b7893ccfSSadaf Ebrahimi When using lost allocations, you may see some Vulkan validation layer warnings
1038*b7893ccfSSadaf Ebrahimi about overlapping regions of memory bound to different kinds of buffers and
1039*b7893ccfSSadaf Ebrahimi images. This is still valid as long as you implement proper handling of lost
1040*b7893ccfSSadaf Ebrahimi allocations (like in the example above) and don't use them.
1041*b7893ccfSSadaf Ebrahimi
1042*b7893ccfSSadaf Ebrahimi You can create an allocation that is already in lost state from the beginning using function
1043*b7893ccfSSadaf Ebrahimi vmaCreateLostAllocation(). It may be useful if you need a "dummy" allocation that is not null.
1044*b7893ccfSSadaf Ebrahimi
1045*b7893ccfSSadaf Ebrahimi You can call function vmaMakePoolAllocationsLost() to set all eligible allocations
1046*b7893ccfSSadaf Ebrahimi in a specified custom pool to lost state.
1047*b7893ccfSSadaf Ebrahimi Allocations that have been "touched" in current frame or VmaPoolCreateInfo::frameInUseCount frames back
1048*b7893ccfSSadaf Ebrahimi cannot become lost.
1049*b7893ccfSSadaf Ebrahimi
1050*b7893ccfSSadaf Ebrahimi <b>Q: Can I touch allocation that cannot become lost?</b>
1051*b7893ccfSSadaf Ebrahimi
1052*b7893ccfSSadaf Ebrahimi Yes, although it has no visible effect.
1053*b7893ccfSSadaf Ebrahimi Calls to vmaGetAllocationInfo() and vmaTouchAllocation() update last use frame index
1054*b7893ccfSSadaf Ebrahimi also for allocations that cannot become lost, but the only way to observe it is to dump
1055*b7893ccfSSadaf Ebrahimi internal allocator state using vmaBuildStatsString().
1056*b7893ccfSSadaf Ebrahimi You can use this feature for debugging purposes to explicitly mark allocations that you use
1057*b7893ccfSSadaf Ebrahimi in current frame and then analyze JSON dump to see for how long each allocation stays unused.
1058*b7893ccfSSadaf Ebrahimi
1059*b7893ccfSSadaf Ebrahimi
1060*b7893ccfSSadaf Ebrahimi \page statistics Statistics
1061*b7893ccfSSadaf Ebrahimi
1062*b7893ccfSSadaf Ebrahimi This library contains functions that return information about its internal state,
1063*b7893ccfSSadaf Ebrahimi especially the amount of memory allocated from Vulkan.
1064*b7893ccfSSadaf Ebrahimi Please keep in mind that these functions need to traverse all internal data structures
1065*b7893ccfSSadaf Ebrahimi to gather these information, so they may be quite time-consuming.
1066*b7893ccfSSadaf Ebrahimi Don't call them too often.
1067*b7893ccfSSadaf Ebrahimi
1068*b7893ccfSSadaf Ebrahimi \section statistics_numeric_statistics Numeric statistics
1069*b7893ccfSSadaf Ebrahimi
1070*b7893ccfSSadaf Ebrahimi You can query for overall statistics of the allocator using function vmaCalculateStats().
1071*b7893ccfSSadaf Ebrahimi Information are returned using structure #VmaStats.
1072*b7893ccfSSadaf Ebrahimi It contains #VmaStatInfo - number of allocated blocks, number of allocations
1073*b7893ccfSSadaf Ebrahimi (occupied ranges in these blocks), number of unused (free) ranges in these blocks,
1074*b7893ccfSSadaf Ebrahimi number of bytes used and unused (but still allocated from Vulkan) and other information.
1075*b7893ccfSSadaf Ebrahimi They are summed across memory heaps, memory types and total for whole allocator.
1076*b7893ccfSSadaf Ebrahimi
1077*b7893ccfSSadaf Ebrahimi You can query for statistics of a custom pool using function vmaGetPoolStats().
1078*b7893ccfSSadaf Ebrahimi Information are returned using structure #VmaPoolStats.
1079*b7893ccfSSadaf Ebrahimi
1080*b7893ccfSSadaf Ebrahimi You can query for information about specific allocation using function vmaGetAllocationInfo().
1081*b7893ccfSSadaf Ebrahimi It fill structure #VmaAllocationInfo.
1082*b7893ccfSSadaf Ebrahimi
1083*b7893ccfSSadaf Ebrahimi \section statistics_json_dump JSON dump
1084*b7893ccfSSadaf Ebrahimi
1085*b7893ccfSSadaf Ebrahimi You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString().
1086*b7893ccfSSadaf Ebrahimi The result is guaranteed to be correct JSON.
1087*b7893ccfSSadaf Ebrahimi It uses ANSI encoding.
1088*b7893ccfSSadaf Ebrahimi Any strings provided by user (see [Allocation names](@ref allocation_names))
1089*b7893ccfSSadaf Ebrahimi are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding,
1090*b7893ccfSSadaf Ebrahimi this JSON string can be treated as using this encoding.
1091*b7893ccfSSadaf Ebrahimi It must be freed using function vmaFreeStatsString().
1092*b7893ccfSSadaf Ebrahimi
1093*b7893ccfSSadaf Ebrahimi The format of this JSON string is not part of official documentation of the library,
1094*b7893ccfSSadaf Ebrahimi but it will not change in backward-incompatible way without increasing library major version number
1095*b7893ccfSSadaf Ebrahimi and appropriate mention in changelog.
1096*b7893ccfSSadaf Ebrahimi
1097*b7893ccfSSadaf Ebrahimi The JSON string contains all the data that can be obtained using vmaCalculateStats().
1098*b7893ccfSSadaf Ebrahimi It can also contain detailed map of allocated memory blocks and their regions -
1099*b7893ccfSSadaf Ebrahimi free and occupied by allocations.
1100*b7893ccfSSadaf Ebrahimi This allows e.g. to visualize the memory or assess fragmentation.
1101*b7893ccfSSadaf Ebrahimi
1102*b7893ccfSSadaf Ebrahimi
1103*b7893ccfSSadaf Ebrahimi \page allocation_annotation Allocation names and user data
1104*b7893ccfSSadaf Ebrahimi
1105*b7893ccfSSadaf Ebrahimi \section allocation_user_data Allocation user data
1106*b7893ccfSSadaf Ebrahimi
1107*b7893ccfSSadaf Ebrahimi You can annotate allocations with your own information, e.g. for debugging purposes.
1108*b7893ccfSSadaf Ebrahimi To do that, fill VmaAllocationCreateInfo::pUserData field when creating
1109*b7893ccfSSadaf Ebrahimi an allocation. It's an opaque `void*` pointer. You can use it e.g. as a pointer,
1110*b7893ccfSSadaf Ebrahimi some handle, index, key, ordinal number or any other value that would associate
1111*b7893ccfSSadaf Ebrahimi the allocation with your custom metadata.
1112*b7893ccfSSadaf Ebrahimi
1113*b7893ccfSSadaf Ebrahimi \code
1114*b7893ccfSSadaf Ebrahimi VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1115*b7893ccfSSadaf Ebrahimi // Fill bufferInfo...
1116*b7893ccfSSadaf Ebrahimi
1117*b7893ccfSSadaf Ebrahimi MyBufferMetadata* pMetadata = CreateBufferMetadata();
1118*b7893ccfSSadaf Ebrahimi
1119*b7893ccfSSadaf Ebrahimi VmaAllocationCreateInfo allocCreateInfo = {};
1120*b7893ccfSSadaf Ebrahimi allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
1121*b7893ccfSSadaf Ebrahimi allocCreateInfo.pUserData = pMetadata;
1122*b7893ccfSSadaf Ebrahimi
1123*b7893ccfSSadaf Ebrahimi VkBuffer buffer;
1124*b7893ccfSSadaf Ebrahimi VmaAllocation allocation;
1125*b7893ccfSSadaf Ebrahimi vmaCreateBuffer(allocator, &bufferInfo, &allocCreateInfo, &buffer, &allocation, nullptr);
1126*b7893ccfSSadaf Ebrahimi \endcode
1127*b7893ccfSSadaf Ebrahimi
1128*b7893ccfSSadaf Ebrahimi The pointer may be later retrieved as VmaAllocationInfo::pUserData:
1129*b7893ccfSSadaf Ebrahimi
1130*b7893ccfSSadaf Ebrahimi \code
1131*b7893ccfSSadaf Ebrahimi VmaAllocationInfo allocInfo;
1132*b7893ccfSSadaf Ebrahimi vmaGetAllocationInfo(allocator, allocation, &allocInfo);
1133*b7893ccfSSadaf Ebrahimi MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData;
1134*b7893ccfSSadaf Ebrahimi \endcode
1135*b7893ccfSSadaf Ebrahimi
1136*b7893ccfSSadaf Ebrahimi It can also be changed using function vmaSetAllocationUserData().
1137*b7893ccfSSadaf Ebrahimi
1138*b7893ccfSSadaf Ebrahimi Values of (non-zero) allocations' `pUserData` are printed in JSON report created by
1139*b7893ccfSSadaf Ebrahimi vmaBuildStatsString(), in hexadecimal form.
1140*b7893ccfSSadaf Ebrahimi
1141*b7893ccfSSadaf Ebrahimi \section allocation_names Allocation names
1142*b7893ccfSSadaf Ebrahimi
1143*b7893ccfSSadaf Ebrahimi There is alternative mode available where `pUserData` pointer is used to point to
1144*b7893ccfSSadaf Ebrahimi a null-terminated string, giving a name to the allocation. To use this mode,
1145*b7893ccfSSadaf Ebrahimi set #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT flag in VmaAllocationCreateInfo::flags.
1146*b7893ccfSSadaf Ebrahimi Then `pUserData` passed as VmaAllocationCreateInfo::pUserData or argument to
1147*b7893ccfSSadaf Ebrahimi vmaSetAllocationUserData() must be either null or pointer to a null-terminated string.
1148*b7893ccfSSadaf Ebrahimi The library creates internal copy of the string, so the pointer you pass doesn't need
1149*b7893ccfSSadaf Ebrahimi to be valid for whole lifetime of the allocation. You can free it after the call.
1150*b7893ccfSSadaf Ebrahimi
1151*b7893ccfSSadaf Ebrahimi \code
1152*b7893ccfSSadaf Ebrahimi VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
1153*b7893ccfSSadaf Ebrahimi // Fill imageInfo...
1154*b7893ccfSSadaf Ebrahimi
1155*b7893ccfSSadaf Ebrahimi std::string imageName = "Texture: ";
1156*b7893ccfSSadaf Ebrahimi imageName += fileName;
1157*b7893ccfSSadaf Ebrahimi
1158*b7893ccfSSadaf Ebrahimi VmaAllocationCreateInfo allocCreateInfo = {};
1159*b7893ccfSSadaf Ebrahimi allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
1160*b7893ccfSSadaf Ebrahimi allocCreateInfo.flags = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT;
1161*b7893ccfSSadaf Ebrahimi allocCreateInfo.pUserData = imageName.c_str();
1162*b7893ccfSSadaf Ebrahimi
1163*b7893ccfSSadaf Ebrahimi VkImage image;
1164*b7893ccfSSadaf Ebrahimi VmaAllocation allocation;
1165*b7893ccfSSadaf Ebrahimi vmaCreateImage(allocator, &imageInfo, &allocCreateInfo, &image, &allocation, nullptr);
1166*b7893ccfSSadaf Ebrahimi \endcode
1167*b7893ccfSSadaf Ebrahimi
1168*b7893ccfSSadaf Ebrahimi The value of `pUserData` pointer of the allocation will be different than the one
1169*b7893ccfSSadaf Ebrahimi you passed when setting allocation's name - pointing to a buffer managed
1170*b7893ccfSSadaf Ebrahimi internally that holds copy of the string.
1171*b7893ccfSSadaf Ebrahimi
1172*b7893ccfSSadaf Ebrahimi \code
1173*b7893ccfSSadaf Ebrahimi VmaAllocationInfo allocInfo;
1174*b7893ccfSSadaf Ebrahimi vmaGetAllocationInfo(allocator, allocation, &allocInfo);
1175*b7893ccfSSadaf Ebrahimi const char* imageName = (const char*)allocInfo.pUserData;
1176*b7893ccfSSadaf Ebrahimi printf("Image name: %s\n", imageName);
1177*b7893ccfSSadaf Ebrahimi \endcode
1178*b7893ccfSSadaf Ebrahimi
1179*b7893ccfSSadaf Ebrahimi That string is also printed in JSON report created by vmaBuildStatsString().
1180*b7893ccfSSadaf Ebrahimi
1181*b7893ccfSSadaf Ebrahimi
1182*b7893ccfSSadaf Ebrahimi \page debugging_memory_usage Debugging incorrect memory usage
1183*b7893ccfSSadaf Ebrahimi
1184*b7893ccfSSadaf Ebrahimi If you suspect a bug with memory usage, like usage of uninitialized memory or
1185*b7893ccfSSadaf Ebrahimi memory being overwritten out of bounds of an allocation,
1186*b7893ccfSSadaf Ebrahimi you can use debug features of this library to verify this.
1187*b7893ccfSSadaf Ebrahimi
1188*b7893ccfSSadaf Ebrahimi \section debugging_memory_usage_initialization Memory initialization
1189*b7893ccfSSadaf Ebrahimi
1190*b7893ccfSSadaf Ebrahimi If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used,
1191*b7893ccfSSadaf Ebrahimi you can enable automatic memory initialization to verify this.
1192*b7893ccfSSadaf Ebrahimi To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1.
1193*b7893ccfSSadaf Ebrahimi
1194*b7893ccfSSadaf Ebrahimi \code
1195*b7893ccfSSadaf Ebrahimi #define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1
1196*b7893ccfSSadaf Ebrahimi #include "vk_mem_alloc.h"
1197*b7893ccfSSadaf Ebrahimi \endcode
1198*b7893ccfSSadaf Ebrahimi
1199*b7893ccfSSadaf Ebrahimi It makes memory of all new allocations initialized to bit pattern `0xDCDCDCDC`.
1200*b7893ccfSSadaf Ebrahimi Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`.
1201*b7893ccfSSadaf Ebrahimi Memory is automatically mapped and unmapped if necessary.
1202*b7893ccfSSadaf Ebrahimi
1203*b7893ccfSSadaf Ebrahimi If you find these values while debugging your program, good chances are that you incorrectly
1204*b7893ccfSSadaf Ebrahimi read Vulkan memory that is allocated but not initialized, or already freed, respectively.
1205*b7893ccfSSadaf Ebrahimi
1206*b7893ccfSSadaf Ebrahimi Memory initialization works only with memory types that are `HOST_VISIBLE`.
1207*b7893ccfSSadaf Ebrahimi It works also with dedicated allocations.
1208*b7893ccfSSadaf Ebrahimi It doesn't work with allocations created with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag,
1209*b7893ccfSSadaf Ebrahimi as they cannot be mapped.
1210*b7893ccfSSadaf Ebrahimi
1211*b7893ccfSSadaf Ebrahimi \section debugging_memory_usage_margins Margins
1212*b7893ccfSSadaf Ebrahimi
1213*b7893ccfSSadaf Ebrahimi By default, allocations are laid out in memory blocks next to each other if possible
1214*b7893ccfSSadaf Ebrahimi (considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`).
1215*b7893ccfSSadaf Ebrahimi
1216*b7893ccfSSadaf Ebrahimi 
1217*b7893ccfSSadaf Ebrahimi
1218*b7893ccfSSadaf Ebrahimi Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified
1219*b7893ccfSSadaf Ebrahimi number of bytes as a margin before and after every allocation.
1220*b7893ccfSSadaf Ebrahimi
1221*b7893ccfSSadaf Ebrahimi \code
1222*b7893ccfSSadaf Ebrahimi #define VMA_DEBUG_MARGIN 16
1223*b7893ccfSSadaf Ebrahimi #include "vk_mem_alloc.h"
1224*b7893ccfSSadaf Ebrahimi \endcode
1225*b7893ccfSSadaf Ebrahimi
1226*b7893ccfSSadaf Ebrahimi 
1227*b7893ccfSSadaf Ebrahimi
1228*b7893ccfSSadaf Ebrahimi If your bug goes away after enabling margins, it means it may be caused by memory
1229*b7893ccfSSadaf Ebrahimi being overwritten outside of allocation boundaries. It is not 100% certain though.
1230*b7893ccfSSadaf Ebrahimi Change in application behavior may also be caused by different order and distribution
1231*b7893ccfSSadaf Ebrahimi of allocations across memory blocks after margins are applied.
1232*b7893ccfSSadaf Ebrahimi
1233*b7893ccfSSadaf Ebrahimi The margin is applied also before first and after last allocation in a block.
1234*b7893ccfSSadaf Ebrahimi It may occur only once between two adjacent allocations.
1235*b7893ccfSSadaf Ebrahimi
1236*b7893ccfSSadaf Ebrahimi Margins work with all types of memory.
1237*b7893ccfSSadaf Ebrahimi
1238*b7893ccfSSadaf Ebrahimi Margin is applied only to allocations made out of memory blocks and not to dedicated
1239*b7893ccfSSadaf Ebrahimi allocations, which have their own memory block of specific size.
1240*b7893ccfSSadaf Ebrahimi It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag
1241*b7893ccfSSadaf Ebrahimi or those automatically decided to put into dedicated allocations, e.g. due to its
1242*b7893ccfSSadaf Ebrahimi large size or recommended by VK_KHR_dedicated_allocation extension.
1243*b7893ccfSSadaf Ebrahimi Margins are also not active in custom pools created with #VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT flag.
1244*b7893ccfSSadaf Ebrahimi
1245*b7893ccfSSadaf Ebrahimi Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space.
1246*b7893ccfSSadaf Ebrahimi
1247*b7893ccfSSadaf Ebrahimi Note that enabling margins increases memory usage and fragmentation.
1248*b7893ccfSSadaf Ebrahimi
1249*b7893ccfSSadaf Ebrahimi \section debugging_memory_usage_corruption_detection Corruption detection
1250*b7893ccfSSadaf Ebrahimi
1251*b7893ccfSSadaf Ebrahimi You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation
1252*b7893ccfSSadaf Ebrahimi of contents of the margins.
1253*b7893ccfSSadaf Ebrahimi
1254*b7893ccfSSadaf Ebrahimi \code
1255*b7893ccfSSadaf Ebrahimi #define VMA_DEBUG_MARGIN 16
1256*b7893ccfSSadaf Ebrahimi #define VMA_DEBUG_DETECT_CORRUPTION 1
1257*b7893ccfSSadaf Ebrahimi #include "vk_mem_alloc.h"
1258*b7893ccfSSadaf Ebrahimi \endcode
1259*b7893ccfSSadaf Ebrahimi
1260*b7893ccfSSadaf Ebrahimi When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN`
1261*b7893ccfSSadaf Ebrahimi (it must be multiply of 4) before and after every allocation is filled with a magic number.
1262*b7893ccfSSadaf Ebrahimi This idea is also know as "canary".
1263*b7893ccfSSadaf Ebrahimi Memory is automatically mapped and unmapped if necessary.
1264*b7893ccfSSadaf Ebrahimi
1265*b7893ccfSSadaf Ebrahimi This number is validated automatically when the allocation is destroyed.
1266*b7893ccfSSadaf Ebrahimi If it's not equal to the expected value, `VMA_ASSERT()` is executed.
1267*b7893ccfSSadaf Ebrahimi It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation,
1268*b7893ccfSSadaf Ebrahimi which indicates a serious bug.
1269*b7893ccfSSadaf Ebrahimi
1270*b7893ccfSSadaf Ebrahimi You can also explicitly request checking margins of all allocations in all memory blocks
1271*b7893ccfSSadaf Ebrahimi that belong to specified memory types by using function vmaCheckCorruption(),
1272*b7893ccfSSadaf Ebrahimi or in memory blocks that belong to specified custom pool, by using function
1273*b7893ccfSSadaf Ebrahimi vmaCheckPoolCorruption().
1274*b7893ccfSSadaf Ebrahimi
1275*b7893ccfSSadaf Ebrahimi Margin validation (corruption detection) works only for memory types that are
1276*b7893ccfSSadaf Ebrahimi `HOST_VISIBLE` and `HOST_COHERENT`.
1277*b7893ccfSSadaf Ebrahimi
1278*b7893ccfSSadaf Ebrahimi
1279*b7893ccfSSadaf Ebrahimi \page record_and_replay Record and replay
1280*b7893ccfSSadaf Ebrahimi
1281*b7893ccfSSadaf Ebrahimi \section record_and_replay_introduction Introduction
1282*b7893ccfSSadaf Ebrahimi
1283*b7893ccfSSadaf Ebrahimi While using the library, sequence of calls to its functions together with their
1284*b7893ccfSSadaf Ebrahimi parameters can be recorded to a file and later replayed using standalone player
1285*b7893ccfSSadaf Ebrahimi application. It can be useful to:
1286*b7893ccfSSadaf Ebrahimi
1287*b7893ccfSSadaf Ebrahimi - Test correctness - check if same sequence of calls will not cause crash or
1288*b7893ccfSSadaf Ebrahimi failures on a target platform.
1289*b7893ccfSSadaf Ebrahimi - Gather statistics - see number of allocations, peak memory usage, number of
1290*b7893ccfSSadaf Ebrahimi calls etc.
1291*b7893ccfSSadaf Ebrahimi - Benchmark performance - see how much time it takes to replay the whole
1292*b7893ccfSSadaf Ebrahimi sequence.
1293*b7893ccfSSadaf Ebrahimi
1294*b7893ccfSSadaf Ebrahimi \section record_and_replay_usage Usage
1295*b7893ccfSSadaf Ebrahimi
1296*b7893ccfSSadaf Ebrahimi <b>To record sequence of calls to a file:</b> Fill in
1297*b7893ccfSSadaf Ebrahimi VmaAllocatorCreateInfo::pRecordSettings member while creating #VmaAllocator
1298*b7893ccfSSadaf Ebrahimi object. File is opened and written during whole lifetime of the allocator.
1299*b7893ccfSSadaf Ebrahimi
1300*b7893ccfSSadaf Ebrahimi <b>To replay file:</b> Use VmaReplay - standalone command-line program.
1301*b7893ccfSSadaf Ebrahimi Precompiled binary can be found in "bin" directory.
1302*b7893ccfSSadaf Ebrahimi Its source can be found in "src/VmaReplay" directory.
1303*b7893ccfSSadaf Ebrahimi Its project is generated by Premake.
1304*b7893ccfSSadaf Ebrahimi Command line syntax is printed when the program is launched without parameters.
1305*b7893ccfSSadaf Ebrahimi Basic usage:
1306*b7893ccfSSadaf Ebrahimi
1307*b7893ccfSSadaf Ebrahimi VmaReplay.exe MyRecording.csv
1308*b7893ccfSSadaf Ebrahimi
1309*b7893ccfSSadaf Ebrahimi <b>Documentation of file format</b> can be found in file: "docs/Recording file format.md".
1310*b7893ccfSSadaf Ebrahimi It's a human-readable, text file in CSV format (Comma Separated Values).
1311*b7893ccfSSadaf Ebrahimi
1312*b7893ccfSSadaf Ebrahimi \section record_and_replay_additional_considerations Additional considerations
1313*b7893ccfSSadaf Ebrahimi
1314*b7893ccfSSadaf Ebrahimi - Replaying file that was recorded on a different GPU (with different parameters
1315*b7893ccfSSadaf Ebrahimi like `bufferImageGranularity`, `nonCoherentAtomSize`, and especially different
1316*b7893ccfSSadaf Ebrahimi set of memory heaps and types) may give different performance and memory usage
1317*b7893ccfSSadaf Ebrahimi results, as well as issue some warnings and errors.
1318*b7893ccfSSadaf Ebrahimi - Current implementation of recording in VMA, as well as VmaReplay application, is
1319*b7893ccfSSadaf Ebrahimi coded and tested only on Windows. Inclusion of recording code is driven by
1320*b7893ccfSSadaf Ebrahimi `VMA_RECORDING_ENABLED` macro. Support for other platforms should be easy to
1321*b7893ccfSSadaf Ebrahimi add. Contributions are welcomed.
1322*b7893ccfSSadaf Ebrahimi - Currently calls to vmaDefragment() function are not recorded.
1323*b7893ccfSSadaf Ebrahimi
1324*b7893ccfSSadaf Ebrahimi
1325*b7893ccfSSadaf Ebrahimi \page usage_patterns Recommended usage patterns
1326*b7893ccfSSadaf Ebrahimi
1327*b7893ccfSSadaf Ebrahimi See also slides from talk:
1328*b7893ccfSSadaf Ebrahimi [Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New)
1329*b7893ccfSSadaf Ebrahimi
1330*b7893ccfSSadaf Ebrahimi
1331*b7893ccfSSadaf Ebrahimi \section usage_patterns_simple Simple patterns
1332*b7893ccfSSadaf Ebrahimi
1333*b7893ccfSSadaf Ebrahimi \subsection usage_patterns_simple_render_targets Render targets
1334*b7893ccfSSadaf Ebrahimi
1335*b7893ccfSSadaf Ebrahimi <b>When:</b>
1336*b7893ccfSSadaf Ebrahimi Any resources that you frequently write and read on GPU,
1337*b7893ccfSSadaf Ebrahimi e.g. images used as color attachments (aka "render targets"), depth-stencil attachments,
1338*b7893ccfSSadaf Ebrahimi images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)").
1339*b7893ccfSSadaf Ebrahimi
1340*b7893ccfSSadaf Ebrahimi <b>What to do:</b>
1341*b7893ccfSSadaf Ebrahimi Create them in video memory that is fastest to access from GPU using
1342*b7893ccfSSadaf Ebrahimi #VMA_MEMORY_USAGE_GPU_ONLY.
1343*b7893ccfSSadaf Ebrahimi
1344*b7893ccfSSadaf Ebrahimi Consider using [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension
1345*b7893ccfSSadaf Ebrahimi and/or manually creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
1346*b7893ccfSSadaf Ebrahimi especially if they are large or if you plan to destroy and recreate them e.g. when
1347*b7893ccfSSadaf Ebrahimi display resolution changes.
1348*b7893ccfSSadaf Ebrahimi Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later.
1349*b7893ccfSSadaf Ebrahimi
1350*b7893ccfSSadaf Ebrahimi \subsection usage_patterns_simple_immutable_resources Immutable resources
1351*b7893ccfSSadaf Ebrahimi
1352*b7893ccfSSadaf Ebrahimi <b>When:</b>
1353*b7893ccfSSadaf Ebrahimi Any resources that you fill on CPU only once (aka "immutable") or infrequently
1354*b7893ccfSSadaf Ebrahimi and then read frequently on GPU,
1355*b7893ccfSSadaf Ebrahimi e.g. textures, vertex and index buffers, constant buffers that don't change often.
1356*b7893ccfSSadaf Ebrahimi
1357*b7893ccfSSadaf Ebrahimi <b>What to do:</b>
1358*b7893ccfSSadaf Ebrahimi Create them in video memory that is fastest to access from GPU using
1359*b7893ccfSSadaf Ebrahimi #VMA_MEMORY_USAGE_GPU_ONLY.
1360*b7893ccfSSadaf Ebrahimi
1361*b7893ccfSSadaf Ebrahimi To initialize content of such resource, create a CPU-side (aka "staging") copy of it
1362*b7893ccfSSadaf Ebrahimi in system memory - #VMA_MEMORY_USAGE_CPU_ONLY, map it, fill it,
1363*b7893ccfSSadaf Ebrahimi and submit a transfer from it to the GPU resource.
1364*b7893ccfSSadaf Ebrahimi You can keep the staging copy if you need it for another upload transfer in the future.
1365*b7893ccfSSadaf Ebrahimi If you don't, you can destroy it or reuse this buffer for uploading different resource
1366*b7893ccfSSadaf Ebrahimi after the transfer finishes.
1367*b7893ccfSSadaf Ebrahimi
1368*b7893ccfSSadaf Ebrahimi Prefer to create just buffers in system memory rather than images, even for uploading textures.
1369*b7893ccfSSadaf Ebrahimi Use `vkCmdCopyBufferToImage()`.
1370*b7893ccfSSadaf Ebrahimi Dont use images with `VK_IMAGE_TILING_LINEAR`.
1371*b7893ccfSSadaf Ebrahimi
1372*b7893ccfSSadaf Ebrahimi \subsection usage_patterns_dynamic_resources Dynamic resources
1373*b7893ccfSSadaf Ebrahimi
1374*b7893ccfSSadaf Ebrahimi <b>When:</b>
1375*b7893ccfSSadaf Ebrahimi Any resources that change frequently (aka "dynamic"), e.g. every frame or every draw call,
1376*b7893ccfSSadaf Ebrahimi written on CPU, read on GPU.
1377*b7893ccfSSadaf Ebrahimi
1378*b7893ccfSSadaf Ebrahimi <b>What to do:</b>
1379*b7893ccfSSadaf Ebrahimi Create them using #VMA_MEMORY_USAGE_CPU_TO_GPU.
1380*b7893ccfSSadaf Ebrahimi You can map it and write to it directly on CPU, as well as read from it on GPU.
1381*b7893ccfSSadaf Ebrahimi
1382*b7893ccfSSadaf Ebrahimi This is a more complex situation. Different solutions are possible,
1383*b7893ccfSSadaf Ebrahimi and the best one depends on specific GPU type, but you can use this simple approach for the start.
1384*b7893ccfSSadaf Ebrahimi Prefer to write to such resource sequentially (e.g. using `memcpy`).
1385*b7893ccfSSadaf Ebrahimi Don't perform random access or any reads from it on CPU, as it may be very slow.
1386*b7893ccfSSadaf Ebrahimi
1387*b7893ccfSSadaf Ebrahimi \subsection usage_patterns_readback Readback
1388*b7893ccfSSadaf Ebrahimi
1389*b7893ccfSSadaf Ebrahimi <b>When:</b>
1390*b7893ccfSSadaf Ebrahimi Resources that contain data written by GPU that you want to read back on CPU,
1391*b7893ccfSSadaf Ebrahimi e.g. results of some computations.
1392*b7893ccfSSadaf Ebrahimi
1393*b7893ccfSSadaf Ebrahimi <b>What to do:</b>
1394*b7893ccfSSadaf Ebrahimi Create them using #VMA_MEMORY_USAGE_GPU_TO_CPU.
1395*b7893ccfSSadaf Ebrahimi You can write to them directly on GPU, as well as map and read them on CPU.
1396*b7893ccfSSadaf Ebrahimi
1397*b7893ccfSSadaf Ebrahimi \section usage_patterns_advanced Advanced patterns
1398*b7893ccfSSadaf Ebrahimi
1399*b7893ccfSSadaf Ebrahimi \subsection usage_patterns_integrated_graphics Detecting integrated graphics
1400*b7893ccfSSadaf Ebrahimi
1401*b7893ccfSSadaf Ebrahimi You can support integrated graphics (like Intel HD Graphics, AMD APU) better
1402*b7893ccfSSadaf Ebrahimi by detecting it in Vulkan.
1403*b7893ccfSSadaf Ebrahimi To do it, call `vkGetPhysicalDeviceProperties()`, inspect
1404*b7893ccfSSadaf Ebrahimi `VkPhysicalDeviceProperties::deviceType` and look for `VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU`.
1405*b7893ccfSSadaf Ebrahimi When you find it, you can assume that memory is unified and all memory types are comparably fast
1406*b7893ccfSSadaf Ebrahimi to access from GPU, regardless of `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
1407*b7893ccfSSadaf Ebrahimi
1408*b7893ccfSSadaf Ebrahimi You can then sum up sizes of all available memory heaps and treat them as useful for
1409*b7893ccfSSadaf Ebrahimi your GPU resources, instead of only `DEVICE_LOCAL` ones.
1410*b7893ccfSSadaf Ebrahimi You can also prefer to create your resources in memory types that are `HOST_VISIBLE` to map them
1411*b7893ccfSSadaf Ebrahimi directly instead of submitting explicit transfer (see below).
1412*b7893ccfSSadaf Ebrahimi
1413*b7893ccfSSadaf Ebrahimi \subsection usage_patterns_direct_vs_transfer Direct access versus transfer
1414*b7893ccfSSadaf Ebrahimi
1415*b7893ccfSSadaf Ebrahimi For resources that you frequently write on CPU and read on GPU, many solutions are possible:
1416*b7893ccfSSadaf Ebrahimi
1417*b7893ccfSSadaf Ebrahimi -# Create one copy in video memory using #VMA_MEMORY_USAGE_GPU_ONLY,
1418*b7893ccfSSadaf Ebrahimi second copy in system memory using #VMA_MEMORY_USAGE_CPU_ONLY and submit explicit tranfer each time.
1419*b7893ccfSSadaf Ebrahimi -# Create just single copy using #VMA_MEMORY_USAGE_CPU_TO_GPU, map it and fill it on CPU,
1420*b7893ccfSSadaf Ebrahimi read it directly on GPU.
1421*b7893ccfSSadaf Ebrahimi -# Create just single copy using #VMA_MEMORY_USAGE_CPU_ONLY, map it and fill it on CPU,
1422*b7893ccfSSadaf Ebrahimi read it directly on GPU.
1423*b7893ccfSSadaf Ebrahimi
1424*b7893ccfSSadaf Ebrahimi Which solution is the most efficient depends on your resource and especially on the GPU.
1425*b7893ccfSSadaf Ebrahimi It is best to measure it and then make the decision.
1426*b7893ccfSSadaf Ebrahimi Some general recommendations:
1427*b7893ccfSSadaf Ebrahimi
1428*b7893ccfSSadaf Ebrahimi - On integrated graphics use (2) or (3) to avoid unnecesary time and memory overhead
1429*b7893ccfSSadaf Ebrahimi related to using a second copy and making transfer.
1430*b7893ccfSSadaf Ebrahimi - For small resources (e.g. constant buffers) use (2).
1431*b7893ccfSSadaf Ebrahimi Discrete AMD cards have special 256 MiB pool of video memory that is directly mappable.
1432*b7893ccfSSadaf Ebrahimi Even if the resource ends up in system memory, its data may be cached on GPU after first
1433*b7893ccfSSadaf Ebrahimi fetch over PCIe bus.
1434*b7893ccfSSadaf Ebrahimi - For larger resources (e.g. textures), decide between (1) and (2).
1435*b7893ccfSSadaf Ebrahimi You may want to differentiate NVIDIA and AMD, e.g. by looking for memory type that is
1436*b7893ccfSSadaf Ebrahimi both `DEVICE_LOCAL` and `HOST_VISIBLE`. When you find it, use (2), otherwise use (1).
1437*b7893ccfSSadaf Ebrahimi
1438*b7893ccfSSadaf Ebrahimi Similarly, for resources that you frequently write on GPU and read on CPU, multiple
1439*b7893ccfSSadaf Ebrahimi solutions are possible:
1440*b7893ccfSSadaf Ebrahimi
1441*b7893ccfSSadaf Ebrahimi -# Create one copy in video memory using #VMA_MEMORY_USAGE_GPU_ONLY,
1442*b7893ccfSSadaf Ebrahimi second copy in system memory using #VMA_MEMORY_USAGE_GPU_TO_CPU and submit explicit tranfer each time.
1443*b7893ccfSSadaf Ebrahimi -# Create just single copy using #VMA_MEMORY_USAGE_GPU_TO_CPU, write to it directly on GPU,
1444*b7893ccfSSadaf Ebrahimi map it and read it on CPU.
1445*b7893ccfSSadaf Ebrahimi
1446*b7893ccfSSadaf Ebrahimi You should take some measurements to decide which option is faster in case of your specific
1447*b7893ccfSSadaf Ebrahimi resource.
1448*b7893ccfSSadaf Ebrahimi
1449*b7893ccfSSadaf Ebrahimi If you don't want to specialize your code for specific types of GPUs, you can still make
1450*b7893ccfSSadaf Ebrahimi an simple optimization for cases when your resource ends up in mappable memory to use it
1451*b7893ccfSSadaf Ebrahimi directly in this case instead of creating CPU-side staging copy.
1452*b7893ccfSSadaf Ebrahimi For details see [Finding out if memory is mappable](@ref memory_mapping_finding_if_memory_mappable).
1453*b7893ccfSSadaf Ebrahimi
1454*b7893ccfSSadaf Ebrahimi
1455*b7893ccfSSadaf Ebrahimi \page configuration Configuration
1456*b7893ccfSSadaf Ebrahimi
1457*b7893ccfSSadaf Ebrahimi Please check "CONFIGURATION SECTION" in the code to find macros that you can define
1458*b7893ccfSSadaf Ebrahimi before each include of this file or change directly in this file to provide
1459*b7893ccfSSadaf Ebrahimi your own implementation of basic facilities like assert, `min()` and `max()` functions,
1460*b7893ccfSSadaf Ebrahimi mutex, atomic etc.
1461*b7893ccfSSadaf Ebrahimi The library uses its own implementation of containers by default, but you can switch to using
1462*b7893ccfSSadaf Ebrahimi STL containers instead.
1463*b7893ccfSSadaf Ebrahimi
1464*b7893ccfSSadaf Ebrahimi \section config_Vulkan_functions Pointers to Vulkan functions
1465*b7893ccfSSadaf Ebrahimi
1466*b7893ccfSSadaf Ebrahimi The library uses Vulkan functions straight from the `vulkan.h` header by default.
1467*b7893ccfSSadaf Ebrahimi If you want to provide your own pointers to these functions, e.g. fetched using
1468*b7893ccfSSadaf Ebrahimi `vkGetInstanceProcAddr()` and `vkGetDeviceProcAddr()`:
1469*b7893ccfSSadaf Ebrahimi
1470*b7893ccfSSadaf Ebrahimi -# Define `VMA_STATIC_VULKAN_FUNCTIONS 0`.
1471*b7893ccfSSadaf Ebrahimi -# Provide valid pointers through VmaAllocatorCreateInfo::pVulkanFunctions.
1472*b7893ccfSSadaf Ebrahimi
1473*b7893ccfSSadaf Ebrahimi \section custom_memory_allocator Custom host memory allocator
1474*b7893ccfSSadaf Ebrahimi
1475*b7893ccfSSadaf Ebrahimi If you use custom allocator for CPU memory rather than default operator `new`
1476*b7893ccfSSadaf Ebrahimi and `delete` from C++, you can make this library using your allocator as well
1477*b7893ccfSSadaf Ebrahimi by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These
1478*b7893ccfSSadaf Ebrahimi functions will be passed to Vulkan, as well as used by the library itself to
1479*b7893ccfSSadaf Ebrahimi make any CPU-side allocations.
1480*b7893ccfSSadaf Ebrahimi
1481*b7893ccfSSadaf Ebrahimi \section allocation_callbacks Device memory allocation callbacks
1482*b7893ccfSSadaf Ebrahimi
1483*b7893ccfSSadaf Ebrahimi The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally.
1484*b7893ccfSSadaf Ebrahimi You can setup callbacks to be informed about these calls, e.g. for the purpose
1485*b7893ccfSSadaf Ebrahimi of gathering some statistics. To do it, fill optional member
1486*b7893ccfSSadaf Ebrahimi VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
1487*b7893ccfSSadaf Ebrahimi
1488*b7893ccfSSadaf Ebrahimi \section heap_memory_limit Device heap memory limit
1489*b7893ccfSSadaf Ebrahimi
1490*b7893ccfSSadaf Ebrahimi If you want to test how your program behaves with limited amount of Vulkan device
1491*b7893ccfSSadaf Ebrahimi memory available without switching your graphics card to one that really has
1492*b7893ccfSSadaf Ebrahimi smaller VRAM, you can use a feature of this library intended for this purpose.
1493*b7893ccfSSadaf Ebrahimi To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit.
1494*b7893ccfSSadaf Ebrahimi
1495*b7893ccfSSadaf Ebrahimi
1496*b7893ccfSSadaf Ebrahimi
1497*b7893ccfSSadaf Ebrahimi \page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation
1498*b7893ccfSSadaf Ebrahimi
1499*b7893ccfSSadaf Ebrahimi VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve
1500*b7893ccfSSadaf Ebrahimi performance on some GPUs. It augments Vulkan API with possibility to query
1501*b7893ccfSSadaf Ebrahimi driver whether it prefers particular buffer or image to have its own, dedicated
1502*b7893ccfSSadaf Ebrahimi allocation (separate `VkDeviceMemory` block) for better efficiency - to be able
1503*b7893ccfSSadaf Ebrahimi to do some internal optimizations.
1504*b7893ccfSSadaf Ebrahimi
1505*b7893ccfSSadaf Ebrahimi The extension is supported by this library. It will be used automatically when
1506*b7893ccfSSadaf Ebrahimi enabled. To enable it:
1507*b7893ccfSSadaf Ebrahimi
1508*b7893ccfSSadaf Ebrahimi 1 . When creating Vulkan device, check if following 2 device extensions are
1509*b7893ccfSSadaf Ebrahimi supported (call `vkEnumerateDeviceExtensionProperties()`).
1510*b7893ccfSSadaf Ebrahimi If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`).
1511*b7893ccfSSadaf Ebrahimi
1512*b7893ccfSSadaf Ebrahimi - VK_KHR_get_memory_requirements2
1513*b7893ccfSSadaf Ebrahimi - VK_KHR_dedicated_allocation
1514*b7893ccfSSadaf Ebrahimi
1515*b7893ccfSSadaf Ebrahimi If you enabled these extensions:
1516*b7893ccfSSadaf Ebrahimi
1517*b7893ccfSSadaf Ebrahimi 2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating
1518*b7893ccfSSadaf Ebrahimi your #VmaAllocator`to inform the library that you enabled required extensions
1519*b7893ccfSSadaf Ebrahimi and you want the library to use them.
1520*b7893ccfSSadaf Ebrahimi
1521*b7893ccfSSadaf Ebrahimi \code
1522*b7893ccfSSadaf Ebrahimi allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT;
1523*b7893ccfSSadaf Ebrahimi
1524*b7893ccfSSadaf Ebrahimi vmaCreateAllocator(&allocatorInfo, &allocator);
1525*b7893ccfSSadaf Ebrahimi \endcode
1526*b7893ccfSSadaf Ebrahimi
1527*b7893ccfSSadaf Ebrahimi That's all. The extension will be automatically used whenever you create a
1528*b7893ccfSSadaf Ebrahimi buffer using vmaCreateBuffer() or image using vmaCreateImage().
1529*b7893ccfSSadaf Ebrahimi
1530*b7893ccfSSadaf Ebrahimi When using the extension together with Vulkan Validation Layer, you will receive
1531*b7893ccfSSadaf Ebrahimi warnings like this:
1532*b7893ccfSSadaf Ebrahimi
1533*b7893ccfSSadaf Ebrahimi vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer.
1534*b7893ccfSSadaf Ebrahimi
1535*b7893ccfSSadaf Ebrahimi It is OK, you should just ignore it. It happens because you use function
1536*b7893ccfSSadaf Ebrahimi `vkGetBufferMemoryRequirements2KHR()` instead of standard
1537*b7893ccfSSadaf Ebrahimi `vkGetBufferMemoryRequirements()`, while the validation layer seems to be
1538*b7893ccfSSadaf Ebrahimi unaware of it.
1539*b7893ccfSSadaf Ebrahimi
1540*b7893ccfSSadaf Ebrahimi To learn more about this extension, see:
1541*b7893ccfSSadaf Ebrahimi
1542*b7893ccfSSadaf Ebrahimi - [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html#VK_KHR_dedicated_allocation)
1543*b7893ccfSSadaf Ebrahimi - [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5)
1544*b7893ccfSSadaf Ebrahimi
1545*b7893ccfSSadaf Ebrahimi
1546*b7893ccfSSadaf Ebrahimi
1547*b7893ccfSSadaf Ebrahimi \page general_considerations General considerations
1548*b7893ccfSSadaf Ebrahimi
1549*b7893ccfSSadaf Ebrahimi \section general_considerations_thread_safety Thread safety
1550*b7893ccfSSadaf Ebrahimi
1551*b7893ccfSSadaf Ebrahimi - The library has no global state, so separate #VmaAllocator objects can be used
1552*b7893ccfSSadaf Ebrahimi independently.
1553*b7893ccfSSadaf Ebrahimi There should be no need to create multiple such objects though - one per `VkDevice` is enough.
1554*b7893ccfSSadaf Ebrahimi - By default, all calls to functions that take #VmaAllocator as first parameter
1555*b7893ccfSSadaf Ebrahimi are safe to call from multiple threads simultaneously because they are
1556*b7893ccfSSadaf Ebrahimi synchronized internally when needed.
1557*b7893ccfSSadaf Ebrahimi - When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
1558*b7893ccfSSadaf Ebrahimi flag, calls to functions that take such #VmaAllocator object must be
1559*b7893ccfSSadaf Ebrahimi synchronized externally.
1560*b7893ccfSSadaf Ebrahimi - Access to a #VmaAllocation object must be externally synchronized. For example,
1561*b7893ccfSSadaf Ebrahimi you must not call vmaGetAllocationInfo() and vmaMapMemory() from different
1562*b7893ccfSSadaf Ebrahimi threads at the same time if you pass the same #VmaAllocation object to these
1563*b7893ccfSSadaf Ebrahimi functions.
1564*b7893ccfSSadaf Ebrahimi
1565*b7893ccfSSadaf Ebrahimi \section general_considerations_validation_layer_warnings Validation layer warnings
1566*b7893ccfSSadaf Ebrahimi
1567*b7893ccfSSadaf Ebrahimi When using this library, you can meet following types of warnings issued by
1568*b7893ccfSSadaf Ebrahimi Vulkan validation layer. They don't necessarily indicate a bug, so you may need
1569*b7893ccfSSadaf Ebrahimi to just ignore them.
1570*b7893ccfSSadaf Ebrahimi
1571*b7893ccfSSadaf Ebrahimi - *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.*
1572*b7893ccfSSadaf Ebrahimi - It happens when VK_KHR_dedicated_allocation extension is enabled.
1573*b7893ccfSSadaf Ebrahimi `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it.
1574*b7893ccfSSadaf Ebrahimi - *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.*
1575*b7893ccfSSadaf Ebrahimi - It happens when you map a buffer or image, because the library maps entire
1576*b7893ccfSSadaf Ebrahimi `VkDeviceMemory` block, where different types of images and buffers may end
1577*b7893ccfSSadaf Ebrahimi up together, especially on GPUs with unified memory like Intel.
1578*b7893ccfSSadaf Ebrahimi - *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.*
1579*b7893ccfSSadaf Ebrahimi - It happens when you use lost allocations, and a new image or buffer is
1580*b7893ccfSSadaf Ebrahimi created in place of an existing object that bacame lost.
1581*b7893ccfSSadaf Ebrahimi - It may happen also when you use [defragmentation](@ref defragmentation).
1582*b7893ccfSSadaf Ebrahimi
1583*b7893ccfSSadaf Ebrahimi \section general_considerations_allocation_algorithm Allocation algorithm
1584*b7893ccfSSadaf Ebrahimi
1585*b7893ccfSSadaf Ebrahimi The library uses following algorithm for allocation, in order:
1586*b7893ccfSSadaf Ebrahimi
1587*b7893ccfSSadaf Ebrahimi -# Try to find free range of memory in existing blocks.
1588*b7893ccfSSadaf Ebrahimi -# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size.
1589*b7893ccfSSadaf Ebrahimi -# If failed, try to create such block with size/2, size/4, size/8.
1590*b7893ccfSSadaf Ebrahimi -# If failed and #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag was
1591*b7893ccfSSadaf Ebrahimi specified, try to find space in existing blocks, possilby making some other
1592*b7893ccfSSadaf Ebrahimi allocations lost.
1593*b7893ccfSSadaf Ebrahimi -# If failed, try to allocate separate `VkDeviceMemory` for this allocation,
1594*b7893ccfSSadaf Ebrahimi just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
1595*b7893ccfSSadaf Ebrahimi -# If failed, choose other memory type that meets the requirements specified in
1596*b7893ccfSSadaf Ebrahimi VmaAllocationCreateInfo and go to point 1.
1597*b7893ccfSSadaf Ebrahimi -# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
1598*b7893ccfSSadaf Ebrahimi
1599*b7893ccfSSadaf Ebrahimi \section general_considerations_features_not_supported Features not supported
1600*b7893ccfSSadaf Ebrahimi
1601*b7893ccfSSadaf Ebrahimi Features deliberately excluded from the scope of this library:
1602*b7893ccfSSadaf Ebrahimi
1603*b7893ccfSSadaf Ebrahimi - Data transfer. Uploading (straming) and downloading data of buffers and images
1604*b7893ccfSSadaf Ebrahimi between CPU and GPU memory and related synchronization is responsibility of the user.
1605*b7893ccfSSadaf Ebrahimi - Allocations for imported/exported external memory. They tend to require
1606*b7893ccfSSadaf Ebrahimi explicit memory type index and dedicated allocation anyway, so they don't
1607*b7893ccfSSadaf Ebrahimi interact with main features of this library. Such special purpose allocations
1608*b7893ccfSSadaf Ebrahimi should be made manually, using `vkCreateBuffer()` and `vkAllocateMemory()`.
1609*b7893ccfSSadaf Ebrahimi - Recreation of buffers and images. Although the library has functions for
1610*b7893ccfSSadaf Ebrahimi buffer and image creation (vmaCreateBuffer(), vmaCreateImage()), you need to
1611*b7893ccfSSadaf Ebrahimi recreate these objects yourself after defragmentation. That's because the big
1612*b7893ccfSSadaf Ebrahimi structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in
1613*b7893ccfSSadaf Ebrahimi #VmaAllocation object.
1614*b7893ccfSSadaf Ebrahimi - Handling CPU memory allocation failures. When dynamically creating small C++
1615*b7893ccfSSadaf Ebrahimi objects in CPU memory (not Vulkan memory), allocation failures are not checked
1616*b7893ccfSSadaf Ebrahimi and handled gracefully, because that would complicate code significantly and
1617*b7893ccfSSadaf Ebrahimi is usually not needed in desktop PC applications anyway.
1618*b7893ccfSSadaf Ebrahimi - Code free of any compiler warnings. Maintaining the library to compile and
1619*b7893ccfSSadaf Ebrahimi work correctly on so many different platforms is hard enough. Being free of
1620*b7893ccfSSadaf Ebrahimi any warnings, on any version of any compiler, is simply not feasible.
1621*b7893ccfSSadaf Ebrahimi - This is a C++ library with C interface.
1622*b7893ccfSSadaf Ebrahimi Bindings or ports to any other programming languages are welcomed as external projects and
1623*b7893ccfSSadaf Ebrahimi are not going to be included into this repository.
1624*b7893ccfSSadaf Ebrahimi
1625*b7893ccfSSadaf Ebrahimi */
1626*b7893ccfSSadaf Ebrahimi
1627*b7893ccfSSadaf Ebrahimi /*
1628*b7893ccfSSadaf Ebrahimi Define this macro to 0/1 to disable/enable support for recording functionality,
1629*b7893ccfSSadaf Ebrahimi available through VmaAllocatorCreateInfo::pRecordSettings.
1630*b7893ccfSSadaf Ebrahimi */
1631*b7893ccfSSadaf Ebrahimi #ifndef VMA_RECORDING_ENABLED
1632*b7893ccfSSadaf Ebrahimi #ifdef _WIN32
1633*b7893ccfSSadaf Ebrahimi #define VMA_RECORDING_ENABLED 1
1634*b7893ccfSSadaf Ebrahimi #else
1635*b7893ccfSSadaf Ebrahimi #define VMA_RECORDING_ENABLED 0
1636*b7893ccfSSadaf Ebrahimi #endif
1637*b7893ccfSSadaf Ebrahimi #endif
1638*b7893ccfSSadaf Ebrahimi
1639*b7893ccfSSadaf Ebrahimi #ifndef NOMINMAX
1640*b7893ccfSSadaf Ebrahimi #define NOMINMAX // For windows.h
1641*b7893ccfSSadaf Ebrahimi #endif
1642*b7893ccfSSadaf Ebrahimi
1643*b7893ccfSSadaf Ebrahimi #ifndef VULKAN_H_
1644*b7893ccfSSadaf Ebrahimi #include <vulkan/vulkan.h>
1645*b7893ccfSSadaf Ebrahimi #endif
1646*b7893ccfSSadaf Ebrahimi
1647*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
1648*b7893ccfSSadaf Ebrahimi #include <windows.h>
1649*b7893ccfSSadaf Ebrahimi #endif
1650*b7893ccfSSadaf Ebrahimi
1651*b7893ccfSSadaf Ebrahimi #if !defined(VMA_DEDICATED_ALLOCATION)
1652*b7893ccfSSadaf Ebrahimi #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1653*b7893ccfSSadaf Ebrahimi #define VMA_DEDICATED_ALLOCATION 1
1654*b7893ccfSSadaf Ebrahimi #else
1655*b7893ccfSSadaf Ebrahimi #define VMA_DEDICATED_ALLOCATION 0
1656*b7893ccfSSadaf Ebrahimi #endif
1657*b7893ccfSSadaf Ebrahimi #endif
1658*b7893ccfSSadaf Ebrahimi
1659*b7893ccfSSadaf Ebrahimi /** \struct VmaAllocator
1660*b7893ccfSSadaf Ebrahimi \brief Represents main object of this library initialized.
1661*b7893ccfSSadaf Ebrahimi
1662*b7893ccfSSadaf Ebrahimi Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it.
1663*b7893ccfSSadaf Ebrahimi Call function vmaDestroyAllocator() to destroy it.
1664*b7893ccfSSadaf Ebrahimi
1665*b7893ccfSSadaf Ebrahimi It is recommended to create just one object of this type per `VkDevice` object,
1666*b7893ccfSSadaf Ebrahimi right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed.
1667*b7893ccfSSadaf Ebrahimi */
1668*b7893ccfSSadaf Ebrahimi VK_DEFINE_HANDLE(VmaAllocator)
1669*b7893ccfSSadaf Ebrahimi
1670*b7893ccfSSadaf Ebrahimi /// Callback function called after successful vkAllocateMemory.
1671*b7893ccfSSadaf Ebrahimi typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1672*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
1673*b7893ccfSSadaf Ebrahimi uint32_t memoryType,
1674*b7893ccfSSadaf Ebrahimi VkDeviceMemory memory,
1675*b7893ccfSSadaf Ebrahimi VkDeviceSize size);
1676*b7893ccfSSadaf Ebrahimi /// Callback function called before vkFreeMemory.
1677*b7893ccfSSadaf Ebrahimi typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1678*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
1679*b7893ccfSSadaf Ebrahimi uint32_t memoryType,
1680*b7893ccfSSadaf Ebrahimi VkDeviceMemory memory,
1681*b7893ccfSSadaf Ebrahimi VkDeviceSize size);
1682*b7893ccfSSadaf Ebrahimi
1683*b7893ccfSSadaf Ebrahimi /** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`.
1684*b7893ccfSSadaf Ebrahimi
1685*b7893ccfSSadaf Ebrahimi Provided for informative purpose, e.g. to gather statistics about number of
1686*b7893ccfSSadaf Ebrahimi allocations or total amount of memory allocated in Vulkan.
1687*b7893ccfSSadaf Ebrahimi
1688*b7893ccfSSadaf Ebrahimi Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
1689*b7893ccfSSadaf Ebrahimi */
1690*b7893ccfSSadaf Ebrahimi typedef struct VmaDeviceMemoryCallbacks {
1691*b7893ccfSSadaf Ebrahimi /// Optional, can be null.
1692*b7893ccfSSadaf Ebrahimi PFN_vmaAllocateDeviceMemoryFunction pfnAllocate;
1693*b7893ccfSSadaf Ebrahimi /// Optional, can be null.
1694*b7893ccfSSadaf Ebrahimi PFN_vmaFreeDeviceMemoryFunction pfnFree;
1695*b7893ccfSSadaf Ebrahimi } VmaDeviceMemoryCallbacks;
1696*b7893ccfSSadaf Ebrahimi
1697*b7893ccfSSadaf Ebrahimi /// Flags for created #VmaAllocator.
1698*b7893ccfSSadaf Ebrahimi typedef enum VmaAllocatorCreateFlagBits {
1699*b7893ccfSSadaf Ebrahimi /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you.
1700*b7893ccfSSadaf Ebrahimi
1701*b7893ccfSSadaf Ebrahimi Using this flag may increase performance because internal mutexes are not used.
1702*b7893ccfSSadaf Ebrahimi */
1703*b7893ccfSSadaf Ebrahimi VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001,
1704*b7893ccfSSadaf Ebrahimi /** \brief Enables usage of VK_KHR_dedicated_allocation extension.
1705*b7893ccfSSadaf Ebrahimi
1706*b7893ccfSSadaf Ebrahimi Using this extenion will automatically allocate dedicated blocks of memory for
1707*b7893ccfSSadaf Ebrahimi some buffers and images instead of suballocating place for them out of bigger
1708*b7893ccfSSadaf Ebrahimi memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
1709*b7893ccfSSadaf Ebrahimi flag) when it is recommended by the driver. It may improve performance on some
1710*b7893ccfSSadaf Ebrahimi GPUs.
1711*b7893ccfSSadaf Ebrahimi
1712*b7893ccfSSadaf Ebrahimi You may set this flag only if you found out that following device extensions are
1713*b7893ccfSSadaf Ebrahimi supported, you enabled them while creating Vulkan device passed as
1714*b7893ccfSSadaf Ebrahimi VmaAllocatorCreateInfo::device, and you want them to be used internally by this
1715*b7893ccfSSadaf Ebrahimi library:
1716*b7893ccfSSadaf Ebrahimi
1717*b7893ccfSSadaf Ebrahimi - VK_KHR_get_memory_requirements2
1718*b7893ccfSSadaf Ebrahimi - VK_KHR_dedicated_allocation
1719*b7893ccfSSadaf Ebrahimi
1720*b7893ccfSSadaf Ebrahimi When this flag is set, you can experience following warnings reported by Vulkan
1721*b7893ccfSSadaf Ebrahimi validation layer. You can ignore them.
1722*b7893ccfSSadaf Ebrahimi
1723*b7893ccfSSadaf Ebrahimi > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer.
1724*b7893ccfSSadaf Ebrahimi */
1725*b7893ccfSSadaf Ebrahimi VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002,
1726*b7893ccfSSadaf Ebrahimi
1727*b7893ccfSSadaf Ebrahimi VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
1728*b7893ccfSSadaf Ebrahimi } VmaAllocatorCreateFlagBits;
1729*b7893ccfSSadaf Ebrahimi typedef VkFlags VmaAllocatorCreateFlags;
1730*b7893ccfSSadaf Ebrahimi
1731*b7893ccfSSadaf Ebrahimi /** \brief Pointers to some Vulkan functions - a subset used by the library.
1732*b7893ccfSSadaf Ebrahimi
1733*b7893ccfSSadaf Ebrahimi Used in VmaAllocatorCreateInfo::pVulkanFunctions.
1734*b7893ccfSSadaf Ebrahimi */
1735*b7893ccfSSadaf Ebrahimi typedef struct VmaVulkanFunctions {
1736*b7893ccfSSadaf Ebrahimi PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1737*b7893ccfSSadaf Ebrahimi PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1738*b7893ccfSSadaf Ebrahimi PFN_vkAllocateMemory vkAllocateMemory;
1739*b7893ccfSSadaf Ebrahimi PFN_vkFreeMemory vkFreeMemory;
1740*b7893ccfSSadaf Ebrahimi PFN_vkMapMemory vkMapMemory;
1741*b7893ccfSSadaf Ebrahimi PFN_vkUnmapMemory vkUnmapMemory;
1742*b7893ccfSSadaf Ebrahimi PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1743*b7893ccfSSadaf Ebrahimi PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1744*b7893ccfSSadaf Ebrahimi PFN_vkBindBufferMemory vkBindBufferMemory;
1745*b7893ccfSSadaf Ebrahimi PFN_vkBindImageMemory vkBindImageMemory;
1746*b7893ccfSSadaf Ebrahimi PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1747*b7893ccfSSadaf Ebrahimi PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1748*b7893ccfSSadaf Ebrahimi PFN_vkCreateBuffer vkCreateBuffer;
1749*b7893ccfSSadaf Ebrahimi PFN_vkDestroyBuffer vkDestroyBuffer;
1750*b7893ccfSSadaf Ebrahimi PFN_vkCreateImage vkCreateImage;
1751*b7893ccfSSadaf Ebrahimi PFN_vkDestroyImage vkDestroyImage;
1752*b7893ccfSSadaf Ebrahimi PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1753*b7893ccfSSadaf Ebrahimi #if VMA_DEDICATED_ALLOCATION
1754*b7893ccfSSadaf Ebrahimi PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1755*b7893ccfSSadaf Ebrahimi PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1756*b7893ccfSSadaf Ebrahimi #endif
1757*b7893ccfSSadaf Ebrahimi } VmaVulkanFunctions;
1758*b7893ccfSSadaf Ebrahimi
1759*b7893ccfSSadaf Ebrahimi /// Flags to be used in VmaRecordSettings::flags.
1760*b7893ccfSSadaf Ebrahimi typedef enum VmaRecordFlagBits {
1761*b7893ccfSSadaf Ebrahimi /** \brief Enables flush after recording every function call.
1762*b7893ccfSSadaf Ebrahimi
1763*b7893ccfSSadaf Ebrahimi Enable it if you expect your application to crash, which may leave recording file truncated.
1764*b7893ccfSSadaf Ebrahimi It may degrade performance though.
1765*b7893ccfSSadaf Ebrahimi */
1766*b7893ccfSSadaf Ebrahimi VMA_RECORD_FLUSH_AFTER_CALL_BIT = 0x00000001,
1767*b7893ccfSSadaf Ebrahimi
1768*b7893ccfSSadaf Ebrahimi VMA_RECORD_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
1769*b7893ccfSSadaf Ebrahimi } VmaRecordFlagBits;
1770*b7893ccfSSadaf Ebrahimi typedef VkFlags VmaRecordFlags;
1771*b7893ccfSSadaf Ebrahimi
1772*b7893ccfSSadaf Ebrahimi /// Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSettings.
1773*b7893ccfSSadaf Ebrahimi typedef struct VmaRecordSettings
1774*b7893ccfSSadaf Ebrahimi {
1775*b7893ccfSSadaf Ebrahimi /// Flags for recording. Use #VmaRecordFlagBits enum.
1776*b7893ccfSSadaf Ebrahimi VmaRecordFlags flags;
1777*b7893ccfSSadaf Ebrahimi /** \brief Path to the file that should be written by the recording.
1778*b7893ccfSSadaf Ebrahimi
1779*b7893ccfSSadaf Ebrahimi Suggested extension: "csv".
1780*b7893ccfSSadaf Ebrahimi If the file already exists, it will be overwritten.
1781*b7893ccfSSadaf Ebrahimi It will be opened for the whole time #VmaAllocator object is alive.
1782*b7893ccfSSadaf Ebrahimi If opening this file fails, creation of the whole allocator object fails.
1783*b7893ccfSSadaf Ebrahimi */
1784*b7893ccfSSadaf Ebrahimi const char* pFilePath;
1785*b7893ccfSSadaf Ebrahimi } VmaRecordSettings;
1786*b7893ccfSSadaf Ebrahimi
1787*b7893ccfSSadaf Ebrahimi /// Description of a Allocator to be created.
1788*b7893ccfSSadaf Ebrahimi typedef struct VmaAllocatorCreateInfo
1789*b7893ccfSSadaf Ebrahimi {
1790*b7893ccfSSadaf Ebrahimi /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum.
1791*b7893ccfSSadaf Ebrahimi VmaAllocatorCreateFlags flags;
1792*b7893ccfSSadaf Ebrahimi /// Vulkan physical device.
1793*b7893ccfSSadaf Ebrahimi /** It must be valid throughout whole lifetime of created allocator. */
1794*b7893ccfSSadaf Ebrahimi VkPhysicalDevice physicalDevice;
1795*b7893ccfSSadaf Ebrahimi /// Vulkan device.
1796*b7893ccfSSadaf Ebrahimi /** It must be valid throughout whole lifetime of created allocator. */
1797*b7893ccfSSadaf Ebrahimi VkDevice device;
1798*b7893ccfSSadaf Ebrahimi /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional.
1799*b7893ccfSSadaf Ebrahimi /** Set to 0 to use default, which is currently 256 MiB. */
1800*b7893ccfSSadaf Ebrahimi VkDeviceSize preferredLargeHeapBlockSize;
1801*b7893ccfSSadaf Ebrahimi /// Custom CPU memory allocation callbacks. Optional.
1802*b7893ccfSSadaf Ebrahimi /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */
1803*b7893ccfSSadaf Ebrahimi const VkAllocationCallbacks* pAllocationCallbacks;
1804*b7893ccfSSadaf Ebrahimi /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional.
1805*b7893ccfSSadaf Ebrahimi /** Optional, can be null. */
1806*b7893ccfSSadaf Ebrahimi const VmaDeviceMemoryCallbacks* pDeviceMemoryCallbacks;
1807*b7893ccfSSadaf Ebrahimi /** \brief Maximum number of additional frames that are in use at the same time as current frame.
1808*b7893ccfSSadaf Ebrahimi
1809*b7893ccfSSadaf Ebrahimi This value is used only when you make allocations with
1810*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocation cannot become
1811*b7893ccfSSadaf Ebrahimi lost if allocation.lastUseFrameIndex >= allocator.currentFrameIndex - frameInUseCount.
1812*b7893ccfSSadaf Ebrahimi
1813*b7893ccfSSadaf Ebrahimi For example, if you double-buffer your command buffers, so resources used for
1814*b7893ccfSSadaf Ebrahimi rendering in previous frame may still be in use by the GPU at the moment you
1815*b7893ccfSSadaf Ebrahimi allocate resources needed for the current frame, set this value to 1.
1816*b7893ccfSSadaf Ebrahimi
1817*b7893ccfSSadaf Ebrahimi If you want to allow any allocations other than used in the current frame to
1818*b7893ccfSSadaf Ebrahimi become lost, set this value to 0.
1819*b7893ccfSSadaf Ebrahimi */
1820*b7893ccfSSadaf Ebrahimi uint32_t frameInUseCount;
1821*b7893ccfSSadaf Ebrahimi /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap.
1822*b7893ccfSSadaf Ebrahimi
1823*b7893ccfSSadaf Ebrahimi If not NULL, it must be a pointer to an array of
1824*b7893ccfSSadaf Ebrahimi `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on
1825*b7893ccfSSadaf Ebrahimi maximum number of bytes that can be allocated out of particular Vulkan memory
1826*b7893ccfSSadaf Ebrahimi heap.
1827*b7893ccfSSadaf Ebrahimi
1828*b7893ccfSSadaf Ebrahimi Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that
1829*b7893ccfSSadaf Ebrahimi heap. This is also the default in case of `pHeapSizeLimit` = NULL.
1830*b7893ccfSSadaf Ebrahimi
1831*b7893ccfSSadaf Ebrahimi If there is a limit defined for a heap:
1832*b7893ccfSSadaf Ebrahimi
1833*b7893ccfSSadaf Ebrahimi - If user tries to allocate more memory from that heap using this allocator,
1834*b7893ccfSSadaf Ebrahimi the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
1835*b7893ccfSSadaf Ebrahimi - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the
1836*b7893ccfSSadaf Ebrahimi value of this limit will be reported instead when using vmaGetMemoryProperties().
1837*b7893ccfSSadaf Ebrahimi
1838*b7893ccfSSadaf Ebrahimi Warning! Using this feature may not be equivalent to installing a GPU with
1839*b7893ccfSSadaf Ebrahimi smaller amount of memory, because graphics driver doesn't necessary fail new
1840*b7893ccfSSadaf Ebrahimi allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is
1841*b7893ccfSSadaf Ebrahimi exceeded. It may return success and just silently migrate some device memory
1842*b7893ccfSSadaf Ebrahimi blocks to system RAM. This driver behavior can also be controlled using
1843*b7893ccfSSadaf Ebrahimi VK_AMD_memory_overallocation_behavior extension.
1844*b7893ccfSSadaf Ebrahimi */
1845*b7893ccfSSadaf Ebrahimi const VkDeviceSize* pHeapSizeLimit;
1846*b7893ccfSSadaf Ebrahimi /** \brief Pointers to Vulkan functions. Can be null if you leave define `VMA_STATIC_VULKAN_FUNCTIONS 1`.
1847*b7893ccfSSadaf Ebrahimi
1848*b7893ccfSSadaf Ebrahimi If you leave define `VMA_STATIC_VULKAN_FUNCTIONS 1` in configuration section,
1849*b7893ccfSSadaf Ebrahimi you can pass null as this member, because the library will fetch pointers to
1850*b7893ccfSSadaf Ebrahimi Vulkan functions internally in a static way, like:
1851*b7893ccfSSadaf Ebrahimi
1852*b7893ccfSSadaf Ebrahimi vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1853*b7893ccfSSadaf Ebrahimi
1854*b7893ccfSSadaf Ebrahimi Fill this member if you want to provide your own pointers to Vulkan functions,
1855*b7893ccfSSadaf Ebrahimi e.g. fetched using `vkGetInstanceProcAddr()` and `vkGetDeviceProcAddr()`.
1856*b7893ccfSSadaf Ebrahimi */
1857*b7893ccfSSadaf Ebrahimi const VmaVulkanFunctions* pVulkanFunctions;
1858*b7893ccfSSadaf Ebrahimi /** \brief Parameters for recording of VMA calls. Can be null.
1859*b7893ccfSSadaf Ebrahimi
1860*b7893ccfSSadaf Ebrahimi If not null, it enables recording of calls to VMA functions to a file.
1861*b7893ccfSSadaf Ebrahimi If support for recording is not enabled using `VMA_RECORDING_ENABLED` macro,
1862*b7893ccfSSadaf Ebrahimi creation of the allocator object fails with `VK_ERROR_FEATURE_NOT_PRESENT`.
1863*b7893ccfSSadaf Ebrahimi */
1864*b7893ccfSSadaf Ebrahimi const VmaRecordSettings* pRecordSettings;
1865*b7893ccfSSadaf Ebrahimi } VmaAllocatorCreateInfo;
1866*b7893ccfSSadaf Ebrahimi
1867*b7893ccfSSadaf Ebrahimi /// Creates Allocator object.
1868*b7893ccfSSadaf Ebrahimi VkResult vmaCreateAllocator(
1869*b7893ccfSSadaf Ebrahimi const VmaAllocatorCreateInfo* pCreateInfo,
1870*b7893ccfSSadaf Ebrahimi VmaAllocator* pAllocator);
1871*b7893ccfSSadaf Ebrahimi
1872*b7893ccfSSadaf Ebrahimi /// Destroys allocator object.
1873*b7893ccfSSadaf Ebrahimi void vmaDestroyAllocator(
1874*b7893ccfSSadaf Ebrahimi VmaAllocator allocator);
1875*b7893ccfSSadaf Ebrahimi
1876*b7893ccfSSadaf Ebrahimi /**
1877*b7893ccfSSadaf Ebrahimi PhysicalDeviceProperties are fetched from physicalDevice by the allocator.
1878*b7893ccfSSadaf Ebrahimi You can access it here, without fetching it again on your own.
1879*b7893ccfSSadaf Ebrahimi */
1880*b7893ccfSSadaf Ebrahimi void vmaGetPhysicalDeviceProperties(
1881*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
1882*b7893ccfSSadaf Ebrahimi const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1883*b7893ccfSSadaf Ebrahimi
1884*b7893ccfSSadaf Ebrahimi /**
1885*b7893ccfSSadaf Ebrahimi PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator.
1886*b7893ccfSSadaf Ebrahimi You can access it here, without fetching it again on your own.
1887*b7893ccfSSadaf Ebrahimi */
1888*b7893ccfSSadaf Ebrahimi void vmaGetMemoryProperties(
1889*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
1890*b7893ccfSSadaf Ebrahimi const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1891*b7893ccfSSadaf Ebrahimi
1892*b7893ccfSSadaf Ebrahimi /**
1893*b7893ccfSSadaf Ebrahimi \brief Given Memory Type Index, returns Property Flags of this memory type.
1894*b7893ccfSSadaf Ebrahimi
1895*b7893ccfSSadaf Ebrahimi This is just a convenience function. Same information can be obtained using
1896*b7893ccfSSadaf Ebrahimi vmaGetMemoryProperties().
1897*b7893ccfSSadaf Ebrahimi */
1898*b7893ccfSSadaf Ebrahimi void vmaGetMemoryTypeProperties(
1899*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
1900*b7893ccfSSadaf Ebrahimi uint32_t memoryTypeIndex,
1901*b7893ccfSSadaf Ebrahimi VkMemoryPropertyFlags* pFlags);
1902*b7893ccfSSadaf Ebrahimi
1903*b7893ccfSSadaf Ebrahimi /** \brief Sets index of the current frame.
1904*b7893ccfSSadaf Ebrahimi
1905*b7893ccfSSadaf Ebrahimi This function must be used if you make allocations with
1906*b7893ccfSSadaf Ebrahimi #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT and
1907*b7893ccfSSadaf Ebrahimi #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flags to inform the allocator
1908*b7893ccfSSadaf Ebrahimi when a new frame begins. Allocations queried using vmaGetAllocationInfo() cannot
1909*b7893ccfSSadaf Ebrahimi become lost in the current frame.
1910*b7893ccfSSadaf Ebrahimi */
1911*b7893ccfSSadaf Ebrahimi void vmaSetCurrentFrameIndex(
1912*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
1913*b7893ccfSSadaf Ebrahimi uint32_t frameIndex);
1914*b7893ccfSSadaf Ebrahimi
1915*b7893ccfSSadaf Ebrahimi /** \brief Calculated statistics of memory usage in entire allocator.
1916*b7893ccfSSadaf Ebrahimi */
1917*b7893ccfSSadaf Ebrahimi typedef struct VmaStatInfo
1918*b7893ccfSSadaf Ebrahimi {
1919*b7893ccfSSadaf Ebrahimi /// Number of `VkDeviceMemory` Vulkan memory blocks allocated.
1920*b7893ccfSSadaf Ebrahimi uint32_t blockCount;
1921*b7893ccfSSadaf Ebrahimi /// Number of #VmaAllocation allocation objects allocated.
1922*b7893ccfSSadaf Ebrahimi uint32_t allocationCount;
1923*b7893ccfSSadaf Ebrahimi /// Number of free ranges of memory between allocations.
1924*b7893ccfSSadaf Ebrahimi uint32_t unusedRangeCount;
1925*b7893ccfSSadaf Ebrahimi /// Total number of bytes occupied by all allocations.
1926*b7893ccfSSadaf Ebrahimi VkDeviceSize usedBytes;
1927*b7893ccfSSadaf Ebrahimi /// Total number of bytes occupied by unused ranges.
1928*b7893ccfSSadaf Ebrahimi VkDeviceSize unusedBytes;
1929*b7893ccfSSadaf Ebrahimi VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1930*b7893ccfSSadaf Ebrahimi VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1931*b7893ccfSSadaf Ebrahimi } VmaStatInfo;
1932*b7893ccfSSadaf Ebrahimi
1933*b7893ccfSSadaf Ebrahimi /// General statistics from current state of Allocator.
1934*b7893ccfSSadaf Ebrahimi typedef struct VmaStats
1935*b7893ccfSSadaf Ebrahimi {
1936*b7893ccfSSadaf Ebrahimi VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1937*b7893ccfSSadaf Ebrahimi VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1938*b7893ccfSSadaf Ebrahimi VmaStatInfo total;
1939*b7893ccfSSadaf Ebrahimi } VmaStats;
1940*b7893ccfSSadaf Ebrahimi
1941*b7893ccfSSadaf Ebrahimi /// Retrieves statistics from current state of the Allocator.
1942*b7893ccfSSadaf Ebrahimi void vmaCalculateStats(
1943*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
1944*b7893ccfSSadaf Ebrahimi VmaStats* pStats);
1945*b7893ccfSSadaf Ebrahimi
1946*b7893ccfSSadaf Ebrahimi #define VMA_STATS_STRING_ENABLED 1
1947*b7893ccfSSadaf Ebrahimi
1948*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
1949*b7893ccfSSadaf Ebrahimi
1950*b7893ccfSSadaf Ebrahimi /// Builds and returns statistics as string in JSON format.
1951*b7893ccfSSadaf Ebrahimi /** @param[out] ppStatsString Must be freed using vmaFreeStatsString() function.
1952*b7893ccfSSadaf Ebrahimi */
1953*b7893ccfSSadaf Ebrahimi void vmaBuildStatsString(
1954*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
1955*b7893ccfSSadaf Ebrahimi char** ppStatsString,
1956*b7893ccfSSadaf Ebrahimi VkBool32 detailedMap);
1957*b7893ccfSSadaf Ebrahimi
1958*b7893ccfSSadaf Ebrahimi void vmaFreeStatsString(
1959*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
1960*b7893ccfSSadaf Ebrahimi char* pStatsString);
1961*b7893ccfSSadaf Ebrahimi
1962*b7893ccfSSadaf Ebrahimi #endif // #if VMA_STATS_STRING_ENABLED
1963*b7893ccfSSadaf Ebrahimi
1964*b7893ccfSSadaf Ebrahimi /** \struct VmaPool
1965*b7893ccfSSadaf Ebrahimi \brief Represents custom memory pool
1966*b7893ccfSSadaf Ebrahimi
1967*b7893ccfSSadaf Ebrahimi Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it.
1968*b7893ccfSSadaf Ebrahimi Call function vmaDestroyPool() to destroy it.
1969*b7893ccfSSadaf Ebrahimi
1970*b7893ccfSSadaf Ebrahimi For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools).
1971*b7893ccfSSadaf Ebrahimi */
1972*b7893ccfSSadaf Ebrahimi VK_DEFINE_HANDLE(VmaPool)
1973*b7893ccfSSadaf Ebrahimi
1974*b7893ccfSSadaf Ebrahimi typedef enum VmaMemoryUsage
1975*b7893ccfSSadaf Ebrahimi {
1976*b7893ccfSSadaf Ebrahimi /** No intended memory usage specified.
1977*b7893ccfSSadaf Ebrahimi Use other members of VmaAllocationCreateInfo to specify your requirements.
1978*b7893ccfSSadaf Ebrahimi */
1979*b7893ccfSSadaf Ebrahimi VMA_MEMORY_USAGE_UNKNOWN = 0,
1980*b7893ccfSSadaf Ebrahimi /** Memory will be used on device only, so fast access from the device is preferred.
1981*b7893ccfSSadaf Ebrahimi It usually means device-local GPU (video) memory.
1982*b7893ccfSSadaf Ebrahimi No need to be mappable on host.
1983*b7893ccfSSadaf Ebrahimi It is roughly equivalent of `D3D12_HEAP_TYPE_DEFAULT`.
1984*b7893ccfSSadaf Ebrahimi
1985*b7893ccfSSadaf Ebrahimi Usage:
1986*b7893ccfSSadaf Ebrahimi
1987*b7893ccfSSadaf Ebrahimi - Resources written and read by device, e.g. images used as attachments.
1988*b7893ccfSSadaf Ebrahimi - Resources transferred from host once (immutable) or infrequently and read by
1989*b7893ccfSSadaf Ebrahimi device multiple times, e.g. textures to be sampled, vertex buffers, uniform
1990*b7893ccfSSadaf Ebrahimi (constant) buffers, and majority of other types of resources used on GPU.
1991*b7893ccfSSadaf Ebrahimi
1992*b7893ccfSSadaf Ebrahimi Allocation may still end up in `HOST_VISIBLE` memory on some implementations.
1993*b7893ccfSSadaf Ebrahimi In such case, you are free to map it.
1994*b7893ccfSSadaf Ebrahimi You can use #VMA_ALLOCATION_CREATE_MAPPED_BIT with this usage type.
1995*b7893ccfSSadaf Ebrahimi */
1996*b7893ccfSSadaf Ebrahimi VMA_MEMORY_USAGE_GPU_ONLY = 1,
1997*b7893ccfSSadaf Ebrahimi /** Memory will be mappable on host.
1998*b7893ccfSSadaf Ebrahimi It usually means CPU (system) memory.
1999*b7893ccfSSadaf Ebrahimi Guarantees to be `HOST_VISIBLE` and `HOST_COHERENT`.
2000*b7893ccfSSadaf Ebrahimi CPU access is typically uncached. Writes may be write-combined.
2001*b7893ccfSSadaf Ebrahimi Resources created in this pool may still be accessible to the device, but access to them can be slow.
2002*b7893ccfSSadaf Ebrahimi It is roughly equivalent of `D3D12_HEAP_TYPE_UPLOAD`.
2003*b7893ccfSSadaf Ebrahimi
2004*b7893ccfSSadaf Ebrahimi Usage: Staging copy of resources used as transfer source.
2005*b7893ccfSSadaf Ebrahimi */
2006*b7893ccfSSadaf Ebrahimi VMA_MEMORY_USAGE_CPU_ONLY = 2,
2007*b7893ccfSSadaf Ebrahimi /**
2008*b7893ccfSSadaf Ebrahimi Memory that is both mappable on host (guarantees to be `HOST_VISIBLE`) and preferably fast to access by GPU.
2009*b7893ccfSSadaf Ebrahimi CPU access is typically uncached. Writes may be write-combined.
2010*b7893ccfSSadaf Ebrahimi
2011*b7893ccfSSadaf Ebrahimi Usage: Resources written frequently by host (dynamic), read by device. E.g. textures, vertex buffers, uniform buffers updated every frame or every draw call.
2012*b7893ccfSSadaf Ebrahimi */
2013*b7893ccfSSadaf Ebrahimi VMA_MEMORY_USAGE_CPU_TO_GPU = 3,
2014*b7893ccfSSadaf Ebrahimi /** Memory mappable on host (guarantees to be `HOST_VISIBLE`) and cached.
2015*b7893ccfSSadaf Ebrahimi It is roughly equivalent of `D3D12_HEAP_TYPE_READBACK`.
2016*b7893ccfSSadaf Ebrahimi
2017*b7893ccfSSadaf Ebrahimi Usage:
2018*b7893ccfSSadaf Ebrahimi
2019*b7893ccfSSadaf Ebrahimi - Resources written by device, read by host - results of some computations, e.g. screen capture, average scene luminance for HDR tone mapping.
2020*b7893ccfSSadaf Ebrahimi - Any resources read or accessed randomly on host, e.g. CPU-side copy of vertex buffer used as source of transfer, but also used for collision detection.
2021*b7893ccfSSadaf Ebrahimi */
2022*b7893ccfSSadaf Ebrahimi VMA_MEMORY_USAGE_GPU_TO_CPU = 4,
2023*b7893ccfSSadaf Ebrahimi VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
2024*b7893ccfSSadaf Ebrahimi } VmaMemoryUsage;
2025*b7893ccfSSadaf Ebrahimi
2026*b7893ccfSSadaf Ebrahimi /// Flags to be passed as VmaAllocationCreateInfo::flags.
2027*b7893ccfSSadaf Ebrahimi typedef enum VmaAllocationCreateFlagBits {
2028*b7893ccfSSadaf Ebrahimi /** \brief Set this flag if the allocation should have its own memory block.
2029*b7893ccfSSadaf Ebrahimi
2030*b7893ccfSSadaf Ebrahimi Use it for special, big resources, like fullscreen images used as attachments.
2031*b7893ccfSSadaf Ebrahimi
2032*b7893ccfSSadaf Ebrahimi This flag must also be used for host visible resources that you want to map
2033*b7893ccfSSadaf Ebrahimi simultaneously because otherwise they might end up as regions of the same
2034*b7893ccfSSadaf Ebrahimi `VkDeviceMemory`, while mapping same `VkDeviceMemory` multiple times
2035*b7893ccfSSadaf Ebrahimi simultaneously is illegal.
2036*b7893ccfSSadaf Ebrahimi
2037*b7893ccfSSadaf Ebrahimi You should not use this flag if VmaAllocationCreateInfo::pool is not null.
2038*b7893ccfSSadaf Ebrahimi */
2039*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001,
2040*b7893ccfSSadaf Ebrahimi
2041*b7893ccfSSadaf Ebrahimi /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block.
2042*b7893ccfSSadaf Ebrahimi
2043*b7893ccfSSadaf Ebrahimi If new allocation cannot be placed in any of the existing blocks, allocation
2044*b7893ccfSSadaf Ebrahimi fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
2045*b7893ccfSSadaf Ebrahimi
2046*b7893ccfSSadaf Ebrahimi You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and
2047*b7893ccfSSadaf Ebrahimi #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense.
2048*b7893ccfSSadaf Ebrahimi
2049*b7893ccfSSadaf Ebrahimi If VmaAllocationCreateInfo::pool is not null, this flag is implied and ignored. */
2050*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002,
2051*b7893ccfSSadaf Ebrahimi /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
2052*b7893ccfSSadaf Ebrahimi
2053*b7893ccfSSadaf Ebrahimi Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData.
2054*b7893ccfSSadaf Ebrahimi
2055*b7893ccfSSadaf Ebrahimi Is it valid to use this flag for allocation made from memory type that is not
2056*b7893ccfSSadaf Ebrahimi `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is
2057*b7893ccfSSadaf Ebrahimi useful if you need an allocation that is efficient to use on GPU
2058*b7893ccfSSadaf Ebrahimi (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that
2059*b7893ccfSSadaf Ebrahimi support it (e.g. Intel GPU).
2060*b7893ccfSSadaf Ebrahimi
2061*b7893ccfSSadaf Ebrahimi You should not use this flag together with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT.
2062*b7893ccfSSadaf Ebrahimi */
2063*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004,
2064*b7893ccfSSadaf Ebrahimi /** Allocation created with this flag can become lost as a result of another
2065*b7893ccfSSadaf Ebrahimi allocation with #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag, so you
2066*b7893ccfSSadaf Ebrahimi must check it before use.
2067*b7893ccfSSadaf Ebrahimi
2068*b7893ccfSSadaf Ebrahimi To check if allocation is not lost, call vmaGetAllocationInfo() and check if
2069*b7893ccfSSadaf Ebrahimi VmaAllocationInfo::deviceMemory is not `VK_NULL_HANDLE`.
2070*b7893ccfSSadaf Ebrahimi
2071*b7893ccfSSadaf Ebrahimi For details about supporting lost allocations, see Lost Allocations
2072*b7893ccfSSadaf Ebrahimi chapter of User Guide on Main Page.
2073*b7893ccfSSadaf Ebrahimi
2074*b7893ccfSSadaf Ebrahimi You should not use this flag together with #VMA_ALLOCATION_CREATE_MAPPED_BIT.
2075*b7893ccfSSadaf Ebrahimi */
2076*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT = 0x00000008,
2077*b7893ccfSSadaf Ebrahimi /** While creating allocation using this flag, other allocations that were
2078*b7893ccfSSadaf Ebrahimi created with flag #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT can become lost.
2079*b7893ccfSSadaf Ebrahimi
2080*b7893ccfSSadaf Ebrahimi For details about supporting lost allocations, see Lost Allocations
2081*b7893ccfSSadaf Ebrahimi chapter of User Guide on Main Page.
2082*b7893ccfSSadaf Ebrahimi */
2083*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT = 0x00000010,
2084*b7893ccfSSadaf Ebrahimi /** Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a
2085*b7893ccfSSadaf Ebrahimi null-terminated string. Instead of copying pointer value, a local copy of the
2086*b7893ccfSSadaf Ebrahimi string is made and stored in allocation's `pUserData`. The string is automatically
2087*b7893ccfSSadaf Ebrahimi freed together with the allocation. It is also used in vmaBuildStatsString().
2088*b7893ccfSSadaf Ebrahimi */
2089*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020,
2090*b7893ccfSSadaf Ebrahimi /** Allocation will be created from upper stack in a double stack pool.
2091*b7893ccfSSadaf Ebrahimi
2092*b7893ccfSSadaf Ebrahimi This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag.
2093*b7893ccfSSadaf Ebrahimi */
2094*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040,
2095*b7893ccfSSadaf Ebrahimi
2096*b7893ccfSSadaf Ebrahimi /** Allocation strategy that chooses smallest possible free range for the
2097*b7893ccfSSadaf Ebrahimi allocation.
2098*b7893ccfSSadaf Ebrahimi */
2099*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = 0x00010000,
2100*b7893ccfSSadaf Ebrahimi /** Allocation strategy that chooses biggest possible free range for the
2101*b7893ccfSSadaf Ebrahimi allocation.
2102*b7893ccfSSadaf Ebrahimi */
2103*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT = 0x00020000,
2104*b7893ccfSSadaf Ebrahimi /** Allocation strategy that chooses first suitable free range for the
2105*b7893ccfSSadaf Ebrahimi allocation.
2106*b7893ccfSSadaf Ebrahimi
2107*b7893ccfSSadaf Ebrahimi "First" doesn't necessarily means the one with smallest offset in memory,
2108*b7893ccfSSadaf Ebrahimi but rather the one that is easiest and fastest to find.
2109*b7893ccfSSadaf Ebrahimi */
2110*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = 0x00040000,
2111*b7893ccfSSadaf Ebrahimi
2112*b7893ccfSSadaf Ebrahimi /** Allocation strategy that tries to minimize memory usage.
2113*b7893ccfSSadaf Ebrahimi */
2114*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT,
2115*b7893ccfSSadaf Ebrahimi /** Allocation strategy that tries to minimize allocation time.
2116*b7893ccfSSadaf Ebrahimi */
2117*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT,
2118*b7893ccfSSadaf Ebrahimi /** Allocation strategy that tries to minimize memory fragmentation.
2119*b7893ccfSSadaf Ebrahimi */
2120*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT = VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT,
2121*b7893ccfSSadaf Ebrahimi
2122*b7893ccfSSadaf Ebrahimi /** A bit mask to extract only `STRATEGY` bits from entire set of flags.
2123*b7893ccfSSadaf Ebrahimi */
2124*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_STRATEGY_MASK =
2125*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT |
2126*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT |
2127*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT,
2128*b7893ccfSSadaf Ebrahimi
2129*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
2130*b7893ccfSSadaf Ebrahimi } VmaAllocationCreateFlagBits;
2131*b7893ccfSSadaf Ebrahimi typedef VkFlags VmaAllocationCreateFlags;
2132*b7893ccfSSadaf Ebrahimi
2133*b7893ccfSSadaf Ebrahimi typedef struct VmaAllocationCreateInfo
2134*b7893ccfSSadaf Ebrahimi {
2135*b7893ccfSSadaf Ebrahimi /// Use #VmaAllocationCreateFlagBits enum.
2136*b7893ccfSSadaf Ebrahimi VmaAllocationCreateFlags flags;
2137*b7893ccfSSadaf Ebrahimi /** \brief Intended usage of memory.
2138*b7893ccfSSadaf Ebrahimi
2139*b7893ccfSSadaf Ebrahimi You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n
2140*b7893ccfSSadaf Ebrahimi If `pool` is not null, this member is ignored.
2141*b7893ccfSSadaf Ebrahimi */
2142*b7893ccfSSadaf Ebrahimi VmaMemoryUsage usage;
2143*b7893ccfSSadaf Ebrahimi /** \brief Flags that must be set in a Memory Type chosen for an allocation.
2144*b7893ccfSSadaf Ebrahimi
2145*b7893ccfSSadaf Ebrahimi Leave 0 if you specify memory requirements in other way. \n
2146*b7893ccfSSadaf Ebrahimi If `pool` is not null, this member is ignored.*/
2147*b7893ccfSSadaf Ebrahimi VkMemoryPropertyFlags requiredFlags;
2148*b7893ccfSSadaf Ebrahimi /** \brief Flags that preferably should be set in a memory type chosen for an allocation.
2149*b7893ccfSSadaf Ebrahimi
2150*b7893ccfSSadaf Ebrahimi Set to 0 if no additional flags are prefered. \n
2151*b7893ccfSSadaf Ebrahimi If `pool` is not null, this member is ignored. */
2152*b7893ccfSSadaf Ebrahimi VkMemoryPropertyFlags preferredFlags;
2153*b7893ccfSSadaf Ebrahimi /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation.
2154*b7893ccfSSadaf Ebrahimi
2155*b7893ccfSSadaf Ebrahimi Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if
2156*b7893ccfSSadaf Ebrahimi it meets other requirements specified by this structure, with no further
2157*b7893ccfSSadaf Ebrahimi restrictions on memory type index. \n
2158*b7893ccfSSadaf Ebrahimi If `pool` is not null, this member is ignored.
2159*b7893ccfSSadaf Ebrahimi */
2160*b7893ccfSSadaf Ebrahimi uint32_t memoryTypeBits;
2161*b7893ccfSSadaf Ebrahimi /** \brief Pool that this allocation should be created in.
2162*b7893ccfSSadaf Ebrahimi
2163*b7893ccfSSadaf Ebrahimi Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members:
2164*b7893ccfSSadaf Ebrahimi `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored.
2165*b7893ccfSSadaf Ebrahimi */
2166*b7893ccfSSadaf Ebrahimi VmaPool pool;
2167*b7893ccfSSadaf Ebrahimi /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData().
2168*b7893ccfSSadaf Ebrahimi
2169*b7893ccfSSadaf Ebrahimi If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either
2170*b7893ccfSSadaf Ebrahimi null or pointer to a null-terminated string. The string will be then copied to
2171*b7893ccfSSadaf Ebrahimi internal buffer, so it doesn't need to be valid after allocation call.
2172*b7893ccfSSadaf Ebrahimi */
2173*b7893ccfSSadaf Ebrahimi void* pUserData;
2174*b7893ccfSSadaf Ebrahimi } VmaAllocationCreateInfo;
2175*b7893ccfSSadaf Ebrahimi
2176*b7893ccfSSadaf Ebrahimi /**
2177*b7893ccfSSadaf Ebrahimi \brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
2178*b7893ccfSSadaf Ebrahimi
2179*b7893ccfSSadaf Ebrahimi This algorithm tries to find a memory type that:
2180*b7893ccfSSadaf Ebrahimi
2181*b7893ccfSSadaf Ebrahimi - Is allowed by memoryTypeBits.
2182*b7893ccfSSadaf Ebrahimi - Contains all the flags from pAllocationCreateInfo->requiredFlags.
2183*b7893ccfSSadaf Ebrahimi - Matches intended usage.
2184*b7893ccfSSadaf Ebrahimi - Has as many flags from pAllocationCreateInfo->preferredFlags as possible.
2185*b7893ccfSSadaf Ebrahimi
2186*b7893ccfSSadaf Ebrahimi \return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result
2187*b7893ccfSSadaf Ebrahimi from this function or any other allocating function probably means that your
2188*b7893ccfSSadaf Ebrahimi device doesn't support any memory type with requested features for the specific
2189*b7893ccfSSadaf Ebrahimi type of resource you want to use it for. Please check parameters of your
2190*b7893ccfSSadaf Ebrahimi resource, like image layout (OPTIMAL versus LINEAR) or mip level count.
2191*b7893ccfSSadaf Ebrahimi */
2192*b7893ccfSSadaf Ebrahimi VkResult vmaFindMemoryTypeIndex(
2193*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2194*b7893ccfSSadaf Ebrahimi uint32_t memoryTypeBits,
2195*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo* pAllocationCreateInfo,
2196*b7893ccfSSadaf Ebrahimi uint32_t* pMemoryTypeIndex);
2197*b7893ccfSSadaf Ebrahimi
2198*b7893ccfSSadaf Ebrahimi /**
2199*b7893ccfSSadaf Ebrahimi \brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
2200*b7893ccfSSadaf Ebrahimi
2201*b7893ccfSSadaf Ebrahimi It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
2202*b7893ccfSSadaf Ebrahimi It internally creates a temporary, dummy buffer that never has memory bound.
2203*b7893ccfSSadaf Ebrahimi It is just a convenience function, equivalent to calling:
2204*b7893ccfSSadaf Ebrahimi
2205*b7893ccfSSadaf Ebrahimi - `vkCreateBuffer`
2206*b7893ccfSSadaf Ebrahimi - `vkGetBufferMemoryRequirements`
2207*b7893ccfSSadaf Ebrahimi - `vmaFindMemoryTypeIndex`
2208*b7893ccfSSadaf Ebrahimi - `vkDestroyBuffer`
2209*b7893ccfSSadaf Ebrahimi */
2210*b7893ccfSSadaf Ebrahimi VkResult vmaFindMemoryTypeIndexForBufferInfo(
2211*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2212*b7893ccfSSadaf Ebrahimi const VkBufferCreateInfo* pBufferCreateInfo,
2213*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo* pAllocationCreateInfo,
2214*b7893ccfSSadaf Ebrahimi uint32_t* pMemoryTypeIndex);
2215*b7893ccfSSadaf Ebrahimi
2216*b7893ccfSSadaf Ebrahimi /**
2217*b7893ccfSSadaf Ebrahimi \brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
2218*b7893ccfSSadaf Ebrahimi
2219*b7893ccfSSadaf Ebrahimi It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
2220*b7893ccfSSadaf Ebrahimi It internally creates a temporary, dummy image that never has memory bound.
2221*b7893ccfSSadaf Ebrahimi It is just a convenience function, equivalent to calling:
2222*b7893ccfSSadaf Ebrahimi
2223*b7893ccfSSadaf Ebrahimi - `vkCreateImage`
2224*b7893ccfSSadaf Ebrahimi - `vkGetImageMemoryRequirements`
2225*b7893ccfSSadaf Ebrahimi - `vmaFindMemoryTypeIndex`
2226*b7893ccfSSadaf Ebrahimi - `vkDestroyImage`
2227*b7893ccfSSadaf Ebrahimi */
2228*b7893ccfSSadaf Ebrahimi VkResult vmaFindMemoryTypeIndexForImageInfo(
2229*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2230*b7893ccfSSadaf Ebrahimi const VkImageCreateInfo* pImageCreateInfo,
2231*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo* pAllocationCreateInfo,
2232*b7893ccfSSadaf Ebrahimi uint32_t* pMemoryTypeIndex);
2233*b7893ccfSSadaf Ebrahimi
2234*b7893ccfSSadaf Ebrahimi /// Flags to be passed as VmaPoolCreateInfo::flags.
2235*b7893ccfSSadaf Ebrahimi typedef enum VmaPoolCreateFlagBits {
2236*b7893ccfSSadaf Ebrahimi /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored.
2237*b7893ccfSSadaf Ebrahimi
2238*b7893ccfSSadaf Ebrahimi This is an optional optimization flag.
2239*b7893ccfSSadaf Ebrahimi
2240*b7893ccfSSadaf Ebrahimi If you always allocate using vmaCreateBuffer(), vmaCreateImage(),
2241*b7893ccfSSadaf Ebrahimi vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator
2242*b7893ccfSSadaf Ebrahimi knows exact type of your allocations so it can handle Buffer-Image Granularity
2243*b7893ccfSSadaf Ebrahimi in the optimal way.
2244*b7893ccfSSadaf Ebrahimi
2245*b7893ccfSSadaf Ebrahimi If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(),
2246*b7893ccfSSadaf Ebrahimi exact type of such allocations is not known, so allocator must be conservative
2247*b7893ccfSSadaf Ebrahimi in handling Buffer-Image Granularity, which can lead to suboptimal allocation
2248*b7893ccfSSadaf Ebrahimi (wasted memory). In that case, if you can make sure you always allocate only
2249*b7893ccfSSadaf Ebrahimi buffers and linear images or only optimal images out of this pool, use this flag
2250*b7893ccfSSadaf Ebrahimi to make allocator disregard Buffer-Image Granularity and so make allocations
2251*b7893ccfSSadaf Ebrahimi faster and more optimal.
2252*b7893ccfSSadaf Ebrahimi */
2253*b7893ccfSSadaf Ebrahimi VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002,
2254*b7893ccfSSadaf Ebrahimi
2255*b7893ccfSSadaf Ebrahimi /** \brief Enables alternative, linear allocation algorithm in this pool.
2256*b7893ccfSSadaf Ebrahimi
2257*b7893ccfSSadaf Ebrahimi Specify this flag to enable linear allocation algorithm, which always creates
2258*b7893ccfSSadaf Ebrahimi new allocations after last one and doesn't reuse space from allocations freed in
2259*b7893ccfSSadaf Ebrahimi between. It trades memory consumption for simplified algorithm and data
2260*b7893ccfSSadaf Ebrahimi structure, which has better performance and uses less memory for metadata.
2261*b7893ccfSSadaf Ebrahimi
2262*b7893ccfSSadaf Ebrahimi By using this flag, you can achieve behavior of free-at-once, stack,
2263*b7893ccfSSadaf Ebrahimi ring buffer, and double stack. For details, see documentation chapter
2264*b7893ccfSSadaf Ebrahimi \ref linear_algorithm.
2265*b7893ccfSSadaf Ebrahimi
2266*b7893ccfSSadaf Ebrahimi When using this flag, you must specify VmaPoolCreateInfo::maxBlockCount == 1 (or 0 for default).
2267*b7893ccfSSadaf Ebrahimi
2268*b7893ccfSSadaf Ebrahimi For more details, see [Linear allocation algorithm](@ref linear_algorithm).
2269*b7893ccfSSadaf Ebrahimi */
2270*b7893ccfSSadaf Ebrahimi VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004,
2271*b7893ccfSSadaf Ebrahimi
2272*b7893ccfSSadaf Ebrahimi /** \brief Enables alternative, buddy allocation algorithm in this pool.
2273*b7893ccfSSadaf Ebrahimi
2274*b7893ccfSSadaf Ebrahimi It operates on a tree of blocks, each having size that is a power of two and
2275*b7893ccfSSadaf Ebrahimi a half of its parent's size. Comparing to default algorithm, this one provides
2276*b7893ccfSSadaf Ebrahimi faster allocation and deallocation and decreased external fragmentation,
2277*b7893ccfSSadaf Ebrahimi at the expense of more memory wasted (internal fragmentation).
2278*b7893ccfSSadaf Ebrahimi
2279*b7893ccfSSadaf Ebrahimi For more details, see [Buddy allocation algorithm](@ref buddy_algorithm).
2280*b7893ccfSSadaf Ebrahimi */
2281*b7893ccfSSadaf Ebrahimi VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT = 0x00000008,
2282*b7893ccfSSadaf Ebrahimi
2283*b7893ccfSSadaf Ebrahimi /** Bit mask to extract only `ALGORITHM` bits from entire set of flags.
2284*b7893ccfSSadaf Ebrahimi */
2285*b7893ccfSSadaf Ebrahimi VMA_POOL_CREATE_ALGORITHM_MASK =
2286*b7893ccfSSadaf Ebrahimi VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT |
2287*b7893ccfSSadaf Ebrahimi VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT,
2288*b7893ccfSSadaf Ebrahimi
2289*b7893ccfSSadaf Ebrahimi VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
2290*b7893ccfSSadaf Ebrahimi } VmaPoolCreateFlagBits;
2291*b7893ccfSSadaf Ebrahimi typedef VkFlags VmaPoolCreateFlags;
2292*b7893ccfSSadaf Ebrahimi
2293*b7893ccfSSadaf Ebrahimi /** \brief Describes parameter of created #VmaPool.
2294*b7893ccfSSadaf Ebrahimi */
2295*b7893ccfSSadaf Ebrahimi typedef struct VmaPoolCreateInfo {
2296*b7893ccfSSadaf Ebrahimi /** \brief Vulkan memory type index to allocate this pool from.
2297*b7893ccfSSadaf Ebrahimi */
2298*b7893ccfSSadaf Ebrahimi uint32_t memoryTypeIndex;
2299*b7893ccfSSadaf Ebrahimi /** \brief Use combination of #VmaPoolCreateFlagBits.
2300*b7893ccfSSadaf Ebrahimi */
2301*b7893ccfSSadaf Ebrahimi VmaPoolCreateFlags flags;
2302*b7893ccfSSadaf Ebrahimi /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional.
2303*b7893ccfSSadaf Ebrahimi
2304*b7893ccfSSadaf Ebrahimi Specify nonzero to set explicit, constant size of memory blocks used by this
2305*b7893ccfSSadaf Ebrahimi pool.
2306*b7893ccfSSadaf Ebrahimi
2307*b7893ccfSSadaf Ebrahimi Leave 0 to use default and let the library manage block sizes automatically.
2308*b7893ccfSSadaf Ebrahimi Sizes of particular blocks may vary.
2309*b7893ccfSSadaf Ebrahimi */
2310*b7893ccfSSadaf Ebrahimi VkDeviceSize blockSize;
2311*b7893ccfSSadaf Ebrahimi /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty.
2312*b7893ccfSSadaf Ebrahimi
2313*b7893ccfSSadaf Ebrahimi Set to 0 to have no preallocated blocks and allow the pool be completely empty.
2314*b7893ccfSSadaf Ebrahimi */
2315*b7893ccfSSadaf Ebrahimi size_t minBlockCount;
2316*b7893ccfSSadaf Ebrahimi /** \brief Maximum number of blocks that can be allocated in this pool. Optional.
2317*b7893ccfSSadaf Ebrahimi
2318*b7893ccfSSadaf Ebrahimi Set to 0 to use default, which is `SIZE_MAX`, which means no limit.
2319*b7893ccfSSadaf Ebrahimi
2320*b7893ccfSSadaf Ebrahimi Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated
2321*b7893ccfSSadaf Ebrahimi throughout whole lifetime of this pool.
2322*b7893ccfSSadaf Ebrahimi */
2323*b7893ccfSSadaf Ebrahimi size_t maxBlockCount;
2324*b7893ccfSSadaf Ebrahimi /** \brief Maximum number of additional frames that are in use at the same time as current frame.
2325*b7893ccfSSadaf Ebrahimi
2326*b7893ccfSSadaf Ebrahimi This value is used only when you make allocations with
2327*b7893ccfSSadaf Ebrahimi #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocation cannot become
2328*b7893ccfSSadaf Ebrahimi lost if allocation.lastUseFrameIndex >= allocator.currentFrameIndex - frameInUseCount.
2329*b7893ccfSSadaf Ebrahimi
2330*b7893ccfSSadaf Ebrahimi For example, if you double-buffer your command buffers, so resources used for
2331*b7893ccfSSadaf Ebrahimi rendering in previous frame may still be in use by the GPU at the moment you
2332*b7893ccfSSadaf Ebrahimi allocate resources needed for the current frame, set this value to 1.
2333*b7893ccfSSadaf Ebrahimi
2334*b7893ccfSSadaf Ebrahimi If you want to allow any allocations other than used in the current frame to
2335*b7893ccfSSadaf Ebrahimi become lost, set this value to 0.
2336*b7893ccfSSadaf Ebrahimi */
2337*b7893ccfSSadaf Ebrahimi uint32_t frameInUseCount;
2338*b7893ccfSSadaf Ebrahimi } VmaPoolCreateInfo;
2339*b7893ccfSSadaf Ebrahimi
2340*b7893ccfSSadaf Ebrahimi /** \brief Describes parameter of existing #VmaPool.
2341*b7893ccfSSadaf Ebrahimi */
2342*b7893ccfSSadaf Ebrahimi typedef struct VmaPoolStats {
2343*b7893ccfSSadaf Ebrahimi /** \brief Total amount of `VkDeviceMemory` allocated from Vulkan for this pool, in bytes.
2344*b7893ccfSSadaf Ebrahimi */
2345*b7893ccfSSadaf Ebrahimi VkDeviceSize size;
2346*b7893ccfSSadaf Ebrahimi /** \brief Total number of bytes in the pool not used by any #VmaAllocation.
2347*b7893ccfSSadaf Ebrahimi */
2348*b7893ccfSSadaf Ebrahimi VkDeviceSize unusedSize;
2349*b7893ccfSSadaf Ebrahimi /** \brief Number of #VmaAllocation objects created from this pool that were not destroyed or lost.
2350*b7893ccfSSadaf Ebrahimi */
2351*b7893ccfSSadaf Ebrahimi size_t allocationCount;
2352*b7893ccfSSadaf Ebrahimi /** \brief Number of continuous memory ranges in the pool not used by any #VmaAllocation.
2353*b7893ccfSSadaf Ebrahimi */
2354*b7893ccfSSadaf Ebrahimi size_t unusedRangeCount;
2355*b7893ccfSSadaf Ebrahimi /** \brief Size of the largest continuous free memory region available for new allocation.
2356*b7893ccfSSadaf Ebrahimi
2357*b7893ccfSSadaf Ebrahimi Making a new allocation of that size is not guaranteed to succeed because of
2358*b7893ccfSSadaf Ebrahimi possible additional margin required to respect alignment and buffer/image
2359*b7893ccfSSadaf Ebrahimi granularity.
2360*b7893ccfSSadaf Ebrahimi */
2361*b7893ccfSSadaf Ebrahimi VkDeviceSize unusedRangeSizeMax;
2362*b7893ccfSSadaf Ebrahimi /** \brief Number of `VkDeviceMemory` blocks allocated for this pool.
2363*b7893ccfSSadaf Ebrahimi */
2364*b7893ccfSSadaf Ebrahimi size_t blockCount;
2365*b7893ccfSSadaf Ebrahimi } VmaPoolStats;
2366*b7893ccfSSadaf Ebrahimi
2367*b7893ccfSSadaf Ebrahimi /** \brief Allocates Vulkan device memory and creates #VmaPool object.
2368*b7893ccfSSadaf Ebrahimi
2369*b7893ccfSSadaf Ebrahimi @param allocator Allocator object.
2370*b7893ccfSSadaf Ebrahimi @param pCreateInfo Parameters of pool to create.
2371*b7893ccfSSadaf Ebrahimi @param[out] pPool Handle to created pool.
2372*b7893ccfSSadaf Ebrahimi */
2373*b7893ccfSSadaf Ebrahimi VkResult vmaCreatePool(
2374*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2375*b7893ccfSSadaf Ebrahimi const VmaPoolCreateInfo* pCreateInfo,
2376*b7893ccfSSadaf Ebrahimi VmaPool* pPool);
2377*b7893ccfSSadaf Ebrahimi
2378*b7893ccfSSadaf Ebrahimi /** \brief Destroys #VmaPool object and frees Vulkan device memory.
2379*b7893ccfSSadaf Ebrahimi */
2380*b7893ccfSSadaf Ebrahimi void vmaDestroyPool(
2381*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2382*b7893ccfSSadaf Ebrahimi VmaPool pool);
2383*b7893ccfSSadaf Ebrahimi
2384*b7893ccfSSadaf Ebrahimi /** \brief Retrieves statistics of existing #VmaPool object.
2385*b7893ccfSSadaf Ebrahimi
2386*b7893ccfSSadaf Ebrahimi @param allocator Allocator object.
2387*b7893ccfSSadaf Ebrahimi @param pool Pool object.
2388*b7893ccfSSadaf Ebrahimi @param[out] pPoolStats Statistics of specified pool.
2389*b7893ccfSSadaf Ebrahimi */
2390*b7893ccfSSadaf Ebrahimi void vmaGetPoolStats(
2391*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2392*b7893ccfSSadaf Ebrahimi VmaPool pool,
2393*b7893ccfSSadaf Ebrahimi VmaPoolStats* pPoolStats);
2394*b7893ccfSSadaf Ebrahimi
2395*b7893ccfSSadaf Ebrahimi /** \brief Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInfo::frameInUseCount back from now.
2396*b7893ccfSSadaf Ebrahimi
2397*b7893ccfSSadaf Ebrahimi @param allocator Allocator object.
2398*b7893ccfSSadaf Ebrahimi @param pool Pool.
2399*b7893ccfSSadaf Ebrahimi @param[out] pLostAllocationCount Number of allocations marked as lost. Optional - pass null if you don't need this information.
2400*b7893ccfSSadaf Ebrahimi */
2401*b7893ccfSSadaf Ebrahimi void vmaMakePoolAllocationsLost(
2402*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2403*b7893ccfSSadaf Ebrahimi VmaPool pool,
2404*b7893ccfSSadaf Ebrahimi size_t* pLostAllocationCount);
2405*b7893ccfSSadaf Ebrahimi
2406*b7893ccfSSadaf Ebrahimi /** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions.
2407*b7893ccfSSadaf Ebrahimi
2408*b7893ccfSSadaf Ebrahimi Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
2409*b7893ccfSSadaf Ebrahimi `VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is
2410*b7893ccfSSadaf Ebrahimi `HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
2411*b7893ccfSSadaf Ebrahimi
2412*b7893ccfSSadaf Ebrahimi Possible return values:
2413*b7893ccfSSadaf Ebrahimi
2414*b7893ccfSSadaf Ebrahimi - `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool.
2415*b7893ccfSSadaf Ebrahimi - `VK_SUCCESS` - corruption detection has been performed and succeeded.
2416*b7893ccfSSadaf Ebrahimi - `VK_ERROR_VALIDATION_FAILED_EXT` - corruption detection has been performed and found memory corruptions around one of the allocations.
2417*b7893ccfSSadaf Ebrahimi `VMA_ASSERT` is also fired in that case.
2418*b7893ccfSSadaf Ebrahimi - Other value: Error returned by Vulkan, e.g. memory mapping failure.
2419*b7893ccfSSadaf Ebrahimi */
2420*b7893ccfSSadaf Ebrahimi VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2421*b7893ccfSSadaf Ebrahimi
2422*b7893ccfSSadaf Ebrahimi /** \struct VmaAllocation
2423*b7893ccfSSadaf Ebrahimi \brief Represents single memory allocation.
2424*b7893ccfSSadaf Ebrahimi
2425*b7893ccfSSadaf Ebrahimi It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type
2426*b7893ccfSSadaf Ebrahimi plus unique offset.
2427*b7893ccfSSadaf Ebrahimi
2428*b7893ccfSSadaf Ebrahimi There are multiple ways to create such object.
2429*b7893ccfSSadaf Ebrahimi You need to fill structure VmaAllocationCreateInfo.
2430*b7893ccfSSadaf Ebrahimi For more information see [Choosing memory type](@ref choosing_memory_type).
2431*b7893ccfSSadaf Ebrahimi
2432*b7893ccfSSadaf Ebrahimi Although the library provides convenience functions that create Vulkan buffer or image,
2433*b7893ccfSSadaf Ebrahimi allocate memory for it and bind them together,
2434*b7893ccfSSadaf Ebrahimi binding of the allocation to a buffer or an image is out of scope of the allocation itself.
2435*b7893ccfSSadaf Ebrahimi Allocation object can exist without buffer/image bound,
2436*b7893ccfSSadaf Ebrahimi binding can be done manually by the user, and destruction of it can be done
2437*b7893ccfSSadaf Ebrahimi independently of destruction of the allocation.
2438*b7893ccfSSadaf Ebrahimi
2439*b7893ccfSSadaf Ebrahimi The object also remembers its size and some other information.
2440*b7893ccfSSadaf Ebrahimi To retrieve this information, use function vmaGetAllocationInfo() and inspect
2441*b7893ccfSSadaf Ebrahimi returned structure VmaAllocationInfo.
2442*b7893ccfSSadaf Ebrahimi
2443*b7893ccfSSadaf Ebrahimi Some kinds allocations can be in lost state.
2444*b7893ccfSSadaf Ebrahimi For more information, see [Lost allocations](@ref lost_allocations).
2445*b7893ccfSSadaf Ebrahimi */
2446*b7893ccfSSadaf Ebrahimi VK_DEFINE_HANDLE(VmaAllocation)
2447*b7893ccfSSadaf Ebrahimi
2448*b7893ccfSSadaf Ebrahimi /** \brief Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
2449*b7893ccfSSadaf Ebrahimi */
2450*b7893ccfSSadaf Ebrahimi typedef struct VmaAllocationInfo {
2451*b7893ccfSSadaf Ebrahimi /** \brief Memory type index that this allocation was allocated from.
2452*b7893ccfSSadaf Ebrahimi
2453*b7893ccfSSadaf Ebrahimi It never changes.
2454*b7893ccfSSadaf Ebrahimi */
2455*b7893ccfSSadaf Ebrahimi uint32_t memoryType;
2456*b7893ccfSSadaf Ebrahimi /** \brief Handle to Vulkan memory object.
2457*b7893ccfSSadaf Ebrahimi
2458*b7893ccfSSadaf Ebrahimi Same memory object can be shared by multiple allocations.
2459*b7893ccfSSadaf Ebrahimi
2460*b7893ccfSSadaf Ebrahimi It can change after call to vmaDefragment() if this allocation is passed to the function, or if allocation is lost.
2461*b7893ccfSSadaf Ebrahimi
2462*b7893ccfSSadaf Ebrahimi If the allocation is lost, it is equal to `VK_NULL_HANDLE`.
2463*b7893ccfSSadaf Ebrahimi */
2464*b7893ccfSSadaf Ebrahimi VkDeviceMemory deviceMemory;
2465*b7893ccfSSadaf Ebrahimi /** \brief Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
2466*b7893ccfSSadaf Ebrahimi
2467*b7893ccfSSadaf Ebrahimi It can change after call to vmaDefragment() if this allocation is passed to the function, or if allocation is lost.
2468*b7893ccfSSadaf Ebrahimi */
2469*b7893ccfSSadaf Ebrahimi VkDeviceSize offset;
2470*b7893ccfSSadaf Ebrahimi /** \brief Size of this allocation, in bytes.
2471*b7893ccfSSadaf Ebrahimi
2472*b7893ccfSSadaf Ebrahimi It never changes, unless allocation is lost.
2473*b7893ccfSSadaf Ebrahimi */
2474*b7893ccfSSadaf Ebrahimi VkDeviceSize size;
2475*b7893ccfSSadaf Ebrahimi /** \brief Pointer to the beginning of this allocation as mapped data.
2476*b7893ccfSSadaf Ebrahimi
2477*b7893ccfSSadaf Ebrahimi If the allocation hasn't been mapped using vmaMapMemory() and hasn't been
2478*b7893ccfSSadaf Ebrahimi created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value null.
2479*b7893ccfSSadaf Ebrahimi
2480*b7893ccfSSadaf Ebrahimi It can change after call to vmaMapMemory(), vmaUnmapMemory().
2481*b7893ccfSSadaf Ebrahimi It can also change after call to vmaDefragment() if this allocation is passed to the function.
2482*b7893ccfSSadaf Ebrahimi */
2483*b7893ccfSSadaf Ebrahimi void* pMappedData;
2484*b7893ccfSSadaf Ebrahimi /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData().
2485*b7893ccfSSadaf Ebrahimi
2486*b7893ccfSSadaf Ebrahimi It can change after call to vmaSetAllocationUserData() for this allocation.
2487*b7893ccfSSadaf Ebrahimi */
2488*b7893ccfSSadaf Ebrahimi void* pUserData;
2489*b7893ccfSSadaf Ebrahimi } VmaAllocationInfo;
2490*b7893ccfSSadaf Ebrahimi
2491*b7893ccfSSadaf Ebrahimi /** \brief General purpose memory allocation.
2492*b7893ccfSSadaf Ebrahimi
2493*b7893ccfSSadaf Ebrahimi @param[out] pAllocation Handle to allocated memory.
2494*b7893ccfSSadaf Ebrahimi @param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
2495*b7893ccfSSadaf Ebrahimi
2496*b7893ccfSSadaf Ebrahimi You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
2497*b7893ccfSSadaf Ebrahimi
2498*b7893ccfSSadaf Ebrahimi It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(),
2499*b7893ccfSSadaf Ebrahimi vmaCreateBuffer(), vmaCreateImage() instead whenever possible.
2500*b7893ccfSSadaf Ebrahimi */
2501*b7893ccfSSadaf Ebrahimi VkResult vmaAllocateMemory(
2502*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2503*b7893ccfSSadaf Ebrahimi const VkMemoryRequirements* pVkMemoryRequirements,
2504*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo* pCreateInfo,
2505*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocation,
2506*b7893ccfSSadaf Ebrahimi VmaAllocationInfo* pAllocationInfo);
2507*b7893ccfSSadaf Ebrahimi
2508*b7893ccfSSadaf Ebrahimi /** \brief General purpose memory allocation for multiple allocation objects at once.
2509*b7893ccfSSadaf Ebrahimi
2510*b7893ccfSSadaf Ebrahimi @param allocator Allocator object.
2511*b7893ccfSSadaf Ebrahimi @param pVkMemoryRequirements Memory requirements for each allocation.
2512*b7893ccfSSadaf Ebrahimi @param pCreateInfo Creation parameters for each alloction.
2513*b7893ccfSSadaf Ebrahimi @param allocationCount Number of allocations to make.
2514*b7893ccfSSadaf Ebrahimi @param[out] pAllocations Pointer to array that will be filled with handles to created allocations.
2515*b7893ccfSSadaf Ebrahimi @param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations.
2516*b7893ccfSSadaf Ebrahimi
2517*b7893ccfSSadaf Ebrahimi You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
2518*b7893ccfSSadaf Ebrahimi
2519*b7893ccfSSadaf Ebrahimi Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding.
2520*b7893ccfSSadaf Ebrahimi It is just a general purpose allocation function able to make multiple allocations at once.
2521*b7893ccfSSadaf Ebrahimi It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times.
2522*b7893ccfSSadaf Ebrahimi
2523*b7893ccfSSadaf Ebrahimi All allocations are made using same parameters. All of them are created out of the same memory pool and type.
2524*b7893ccfSSadaf Ebrahimi If any allocation fails, all allocations already made within this function call are also freed, so that when
2525*b7893ccfSSadaf Ebrahimi returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`.
2526*b7893ccfSSadaf Ebrahimi */
2527*b7893ccfSSadaf Ebrahimi VkResult vmaAllocateMemoryPages(
2528*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2529*b7893ccfSSadaf Ebrahimi const VkMemoryRequirements* pVkMemoryRequirements,
2530*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo* pCreateInfo,
2531*b7893ccfSSadaf Ebrahimi size_t allocationCount,
2532*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocations,
2533*b7893ccfSSadaf Ebrahimi VmaAllocationInfo* pAllocationInfo);
2534*b7893ccfSSadaf Ebrahimi
2535*b7893ccfSSadaf Ebrahimi /**
2536*b7893ccfSSadaf Ebrahimi @param[out] pAllocation Handle to allocated memory.
2537*b7893ccfSSadaf Ebrahimi @param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
2538*b7893ccfSSadaf Ebrahimi
2539*b7893ccfSSadaf Ebrahimi You should free the memory using vmaFreeMemory().
2540*b7893ccfSSadaf Ebrahimi */
2541*b7893ccfSSadaf Ebrahimi VkResult vmaAllocateMemoryForBuffer(
2542*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2543*b7893ccfSSadaf Ebrahimi VkBuffer buffer,
2544*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo* pCreateInfo,
2545*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocation,
2546*b7893ccfSSadaf Ebrahimi VmaAllocationInfo* pAllocationInfo);
2547*b7893ccfSSadaf Ebrahimi
2548*b7893ccfSSadaf Ebrahimi /// Function similar to vmaAllocateMemoryForBuffer().
2549*b7893ccfSSadaf Ebrahimi VkResult vmaAllocateMemoryForImage(
2550*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2551*b7893ccfSSadaf Ebrahimi VkImage image,
2552*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo* pCreateInfo,
2553*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocation,
2554*b7893ccfSSadaf Ebrahimi VmaAllocationInfo* pAllocationInfo);
2555*b7893ccfSSadaf Ebrahimi
2556*b7893ccfSSadaf Ebrahimi /** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
2557*b7893ccfSSadaf Ebrahimi
2558*b7893ccfSSadaf Ebrahimi Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped.
2559*b7893ccfSSadaf Ebrahimi */
2560*b7893ccfSSadaf Ebrahimi void vmaFreeMemory(
2561*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2562*b7893ccfSSadaf Ebrahimi VmaAllocation allocation);
2563*b7893ccfSSadaf Ebrahimi
2564*b7893ccfSSadaf Ebrahimi /** \brief Frees memory and destroys multiple allocations.
2565*b7893ccfSSadaf Ebrahimi
2566*b7893ccfSSadaf Ebrahimi Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding.
2567*b7893ccfSSadaf Ebrahimi It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(),
2568*b7893ccfSSadaf Ebrahimi vmaAllocateMemoryPages() and other functions.
2569*b7893ccfSSadaf Ebrahimi It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times.
2570*b7893ccfSSadaf Ebrahimi
2571*b7893ccfSSadaf Ebrahimi Allocations in `pAllocations` array can come from any memory pools and types.
2572*b7893ccfSSadaf Ebrahimi Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped.
2573*b7893ccfSSadaf Ebrahimi */
2574*b7893ccfSSadaf Ebrahimi void vmaFreeMemoryPages(
2575*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2576*b7893ccfSSadaf Ebrahimi size_t allocationCount,
2577*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocations);
2578*b7893ccfSSadaf Ebrahimi
2579*b7893ccfSSadaf Ebrahimi /** \brief Tries to resize an allocation in place, if there is enough free memory after it.
2580*b7893ccfSSadaf Ebrahimi
2581*b7893ccfSSadaf Ebrahimi Tries to change allocation's size without moving or reallocating it.
2582*b7893ccfSSadaf Ebrahimi You can both shrink and grow allocation size.
2583*b7893ccfSSadaf Ebrahimi When growing, it succeeds only when the allocation belongs to a memory block with enough
2584*b7893ccfSSadaf Ebrahimi free space after it.
2585*b7893ccfSSadaf Ebrahimi
2586*b7893ccfSSadaf Ebrahimi Returns `VK_SUCCESS` if allocation's size has been successfully changed.
2587*b7893ccfSSadaf Ebrahimi Returns `VK_ERROR_OUT_OF_POOL_MEMORY` if allocation's size could not be changed.
2588*b7893ccfSSadaf Ebrahimi
2589*b7893ccfSSadaf Ebrahimi After successful call to this function, VmaAllocationInfo::size of this allocation changes.
2590*b7893ccfSSadaf Ebrahimi All other parameters stay the same: memory pool and type, alignment, offset, mapped pointer.
2591*b7893ccfSSadaf Ebrahimi
2592*b7893ccfSSadaf Ebrahimi - Calling this function on allocation that is in lost state fails with result `VK_ERROR_VALIDATION_FAILED_EXT`.
2593*b7893ccfSSadaf Ebrahimi - Calling this function with `newSize` same as current allocation size does nothing and returns `VK_SUCCESS`.
2594*b7893ccfSSadaf Ebrahimi - Resizing dedicated allocations, as well as allocations created in pools that use linear
2595*b7893ccfSSadaf Ebrahimi or buddy algorithm, is not supported.
2596*b7893ccfSSadaf Ebrahimi The function returns `VK_ERROR_FEATURE_NOT_PRESENT` in such cases.
2597*b7893ccfSSadaf Ebrahimi Support may be added in the future.
2598*b7893ccfSSadaf Ebrahimi */
2599*b7893ccfSSadaf Ebrahimi VkResult vmaResizeAllocation(
2600*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2601*b7893ccfSSadaf Ebrahimi VmaAllocation allocation,
2602*b7893ccfSSadaf Ebrahimi VkDeviceSize newSize);
2603*b7893ccfSSadaf Ebrahimi
2604*b7893ccfSSadaf Ebrahimi /** \brief Returns current information about specified allocation and atomically marks it as used in current frame.
2605*b7893ccfSSadaf Ebrahimi
2606*b7893ccfSSadaf Ebrahimi Current paramters of given allocation are returned in `pAllocationInfo`.
2607*b7893ccfSSadaf Ebrahimi
2608*b7893ccfSSadaf Ebrahimi This function also atomically "touches" allocation - marks it as used in current frame,
2609*b7893ccfSSadaf Ebrahimi just like vmaTouchAllocation().
2610*b7893ccfSSadaf Ebrahimi If the allocation is in lost state, `pAllocationInfo->deviceMemory == VK_NULL_HANDLE`.
2611*b7893ccfSSadaf Ebrahimi
2612*b7893ccfSSadaf Ebrahimi Although this function uses atomics and doesn't lock any mutex, so it should be quite efficient,
2613*b7893ccfSSadaf Ebrahimi you can avoid calling it too often.
2614*b7893ccfSSadaf Ebrahimi
2615*b7893ccfSSadaf Ebrahimi - You can retrieve same VmaAllocationInfo structure while creating your resource, from function
2616*b7893ccfSSadaf Ebrahimi vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change
2617*b7893ccfSSadaf Ebrahimi (e.g. due to defragmentation or allocation becoming lost).
2618*b7893ccfSSadaf Ebrahimi - If you just want to check if allocation is not lost, vmaTouchAllocation() will work faster.
2619*b7893ccfSSadaf Ebrahimi */
2620*b7893ccfSSadaf Ebrahimi void vmaGetAllocationInfo(
2621*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2622*b7893ccfSSadaf Ebrahimi VmaAllocation allocation,
2623*b7893ccfSSadaf Ebrahimi VmaAllocationInfo* pAllocationInfo);
2624*b7893ccfSSadaf Ebrahimi
2625*b7893ccfSSadaf Ebrahimi /** \brief Returns `VK_TRUE` if allocation is not lost and atomically marks it as used in current frame.
2626*b7893ccfSSadaf Ebrahimi
2627*b7893ccfSSadaf Ebrahimi If the allocation has been created with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag,
2628*b7893ccfSSadaf Ebrahimi this function returns `VK_TRUE` if it's not in lost state, so it can still be used.
2629*b7893ccfSSadaf Ebrahimi It then also atomically "touches" the allocation - marks it as used in current frame,
2630*b7893ccfSSadaf Ebrahimi so that you can be sure it won't become lost in current frame or next `frameInUseCount` frames.
2631*b7893ccfSSadaf Ebrahimi
2632*b7893ccfSSadaf Ebrahimi If the allocation is in lost state, the function returns `VK_FALSE`.
2633*b7893ccfSSadaf Ebrahimi Memory of such allocation, as well as buffer or image bound to it, should not be used.
2634*b7893ccfSSadaf Ebrahimi Lost allocation and the buffer/image still need to be destroyed.
2635*b7893ccfSSadaf Ebrahimi
2636*b7893ccfSSadaf Ebrahimi If the allocation has been created without #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag,
2637*b7893ccfSSadaf Ebrahimi this function always returns `VK_TRUE`.
2638*b7893ccfSSadaf Ebrahimi */
2639*b7893ccfSSadaf Ebrahimi VkBool32 vmaTouchAllocation(
2640*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2641*b7893ccfSSadaf Ebrahimi VmaAllocation allocation);
2642*b7893ccfSSadaf Ebrahimi
2643*b7893ccfSSadaf Ebrahimi /** \brief Sets pUserData in given allocation to new value.
2644*b7893ccfSSadaf Ebrahimi
2645*b7893ccfSSadaf Ebrahimi If the allocation was created with VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT,
2646*b7893ccfSSadaf Ebrahimi pUserData must be either null, or pointer to a null-terminated string. The function
2647*b7893ccfSSadaf Ebrahimi makes local copy of the string and sets it as allocation's `pUserData`. String
2648*b7893ccfSSadaf Ebrahimi passed as pUserData doesn't need to be valid for whole lifetime of the allocation -
2649*b7893ccfSSadaf Ebrahimi you can free it after this call. String previously pointed by allocation's
2650*b7893ccfSSadaf Ebrahimi pUserData is freed from memory.
2651*b7893ccfSSadaf Ebrahimi
2652*b7893ccfSSadaf Ebrahimi If the flag was not used, the value of pointer `pUserData` is just copied to
2653*b7893ccfSSadaf Ebrahimi allocation's `pUserData`. It is opaque, so you can use it however you want - e.g.
2654*b7893ccfSSadaf Ebrahimi as a pointer, ordinal number or some handle to you own data.
2655*b7893ccfSSadaf Ebrahimi */
2656*b7893ccfSSadaf Ebrahimi void vmaSetAllocationUserData(
2657*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2658*b7893ccfSSadaf Ebrahimi VmaAllocation allocation,
2659*b7893ccfSSadaf Ebrahimi void* pUserData);
2660*b7893ccfSSadaf Ebrahimi
2661*b7893ccfSSadaf Ebrahimi /** \brief Creates new allocation that is in lost state from the beginning.
2662*b7893ccfSSadaf Ebrahimi
2663*b7893ccfSSadaf Ebrahimi It can be useful if you need a dummy, non-null allocation.
2664*b7893ccfSSadaf Ebrahimi
2665*b7893ccfSSadaf Ebrahimi You still need to destroy created object using vmaFreeMemory().
2666*b7893ccfSSadaf Ebrahimi
2667*b7893ccfSSadaf Ebrahimi Returned allocation is not tied to any specific memory pool or memory type and
2668*b7893ccfSSadaf Ebrahimi not bound to any image or buffer. It has size = 0. It cannot be turned into
2669*b7893ccfSSadaf Ebrahimi a real, non-empty allocation.
2670*b7893ccfSSadaf Ebrahimi */
2671*b7893ccfSSadaf Ebrahimi void vmaCreateLostAllocation(
2672*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2673*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocation);
2674*b7893ccfSSadaf Ebrahimi
2675*b7893ccfSSadaf Ebrahimi /** \brief Maps memory represented by given allocation and returns pointer to it.
2676*b7893ccfSSadaf Ebrahimi
2677*b7893ccfSSadaf Ebrahimi Maps memory represented by given allocation to make it accessible to CPU code.
2678*b7893ccfSSadaf Ebrahimi When succeeded, `*ppData` contains pointer to first byte of this memory.
2679*b7893ccfSSadaf Ebrahimi If the allocation is part of bigger `VkDeviceMemory` block, the pointer is
2680*b7893ccfSSadaf Ebrahimi correctly offseted to the beginning of region assigned to this particular
2681*b7893ccfSSadaf Ebrahimi allocation.
2682*b7893ccfSSadaf Ebrahimi
2683*b7893ccfSSadaf Ebrahimi Mapping is internally reference-counted and synchronized, so despite raw Vulkan
2684*b7893ccfSSadaf Ebrahimi function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory`
2685*b7893ccfSSadaf Ebrahimi multiple times simultaneously, it is safe to call this function on allocations
2686*b7893ccfSSadaf Ebrahimi assigned to the same memory block. Actual Vulkan memory will be mapped on first
2687*b7893ccfSSadaf Ebrahimi mapping and unmapped on last unmapping.
2688*b7893ccfSSadaf Ebrahimi
2689*b7893ccfSSadaf Ebrahimi If the function succeeded, you must call vmaUnmapMemory() to unmap the
2690*b7893ccfSSadaf Ebrahimi allocation when mapping is no longer needed or before freeing the allocation, at
2691*b7893ccfSSadaf Ebrahimi the latest.
2692*b7893ccfSSadaf Ebrahimi
2693*b7893ccfSSadaf Ebrahimi It also safe to call this function multiple times on the same allocation. You
2694*b7893ccfSSadaf Ebrahimi must call vmaUnmapMemory() same number of times as you called vmaMapMemory().
2695*b7893ccfSSadaf Ebrahimi
2696*b7893ccfSSadaf Ebrahimi It is also safe to call this function on allocation created with
2697*b7893ccfSSadaf Ebrahimi #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time.
2698*b7893ccfSSadaf Ebrahimi You must still call vmaUnmapMemory() same number of times as you called
2699*b7893ccfSSadaf Ebrahimi vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the
2700*b7893ccfSSadaf Ebrahimi "0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag.
2701*b7893ccfSSadaf Ebrahimi
2702*b7893ccfSSadaf Ebrahimi This function fails when used on allocation made in memory type that is not
2703*b7893ccfSSadaf Ebrahimi `HOST_VISIBLE`.
2704*b7893ccfSSadaf Ebrahimi
2705*b7893ccfSSadaf Ebrahimi This function always fails when called for allocation that was created with
2706*b7893ccfSSadaf Ebrahimi #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocations cannot be
2707*b7893ccfSSadaf Ebrahimi mapped.
2708*b7893ccfSSadaf Ebrahimi */
2709*b7893ccfSSadaf Ebrahimi VkResult vmaMapMemory(
2710*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2711*b7893ccfSSadaf Ebrahimi VmaAllocation allocation,
2712*b7893ccfSSadaf Ebrahimi void** ppData);
2713*b7893ccfSSadaf Ebrahimi
2714*b7893ccfSSadaf Ebrahimi /** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
2715*b7893ccfSSadaf Ebrahimi
2716*b7893ccfSSadaf Ebrahimi For details, see description of vmaMapMemory().
2717*b7893ccfSSadaf Ebrahimi */
2718*b7893ccfSSadaf Ebrahimi void vmaUnmapMemory(
2719*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2720*b7893ccfSSadaf Ebrahimi VmaAllocation allocation);
2721*b7893ccfSSadaf Ebrahimi
2722*b7893ccfSSadaf Ebrahimi /** \brief Flushes memory of given allocation.
2723*b7893ccfSSadaf Ebrahimi
2724*b7893ccfSSadaf Ebrahimi Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation.
2725*b7893ccfSSadaf Ebrahimi
2726*b7893ccfSSadaf Ebrahimi - `offset` must be relative to the beginning of allocation.
2727*b7893ccfSSadaf Ebrahimi - `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
2728*b7893ccfSSadaf Ebrahimi - `offset` and `size` don't have to be aligned.
2729*b7893ccfSSadaf Ebrahimi They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
2730*b7893ccfSSadaf Ebrahimi - If `size` is 0, this call is ignored.
2731*b7893ccfSSadaf Ebrahimi - If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
2732*b7893ccfSSadaf Ebrahimi this call is ignored.
2733*b7893ccfSSadaf Ebrahimi */
2734*b7893ccfSSadaf Ebrahimi void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2735*b7893ccfSSadaf Ebrahimi
2736*b7893ccfSSadaf Ebrahimi /** \brief Invalidates memory of given allocation.
2737*b7893ccfSSadaf Ebrahimi
2738*b7893ccfSSadaf Ebrahimi Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation.
2739*b7893ccfSSadaf Ebrahimi
2740*b7893ccfSSadaf Ebrahimi - `offset` must be relative to the beginning of allocation.
2741*b7893ccfSSadaf Ebrahimi - `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
2742*b7893ccfSSadaf Ebrahimi - `offset` and `size` don't have to be aligned.
2743*b7893ccfSSadaf Ebrahimi They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
2744*b7893ccfSSadaf Ebrahimi - If `size` is 0, this call is ignored.
2745*b7893ccfSSadaf Ebrahimi - If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
2746*b7893ccfSSadaf Ebrahimi this call is ignored.
2747*b7893ccfSSadaf Ebrahimi */
2748*b7893ccfSSadaf Ebrahimi void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2749*b7893ccfSSadaf Ebrahimi
2750*b7893ccfSSadaf Ebrahimi /** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions.
2751*b7893ccfSSadaf Ebrahimi
2752*b7893ccfSSadaf Ebrahimi @param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked.
2753*b7893ccfSSadaf Ebrahimi
2754*b7893ccfSSadaf Ebrahimi Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
2755*b7893ccfSSadaf Ebrahimi `VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are
2756*b7893ccfSSadaf Ebrahimi `HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
2757*b7893ccfSSadaf Ebrahimi
2758*b7893ccfSSadaf Ebrahimi Possible return values:
2759*b7893ccfSSadaf Ebrahimi
2760*b7893ccfSSadaf Ebrahimi - `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types.
2761*b7893ccfSSadaf Ebrahimi - `VK_SUCCESS` - corruption detection has been performed and succeeded.
2762*b7893ccfSSadaf Ebrahimi - `VK_ERROR_VALIDATION_FAILED_EXT` - corruption detection has been performed and found memory corruptions around one of the allocations.
2763*b7893ccfSSadaf Ebrahimi `VMA_ASSERT` is also fired in that case.
2764*b7893ccfSSadaf Ebrahimi - Other value: Error returned by Vulkan, e.g. memory mapping failure.
2765*b7893ccfSSadaf Ebrahimi */
2766*b7893ccfSSadaf Ebrahimi VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2767*b7893ccfSSadaf Ebrahimi
2768*b7893ccfSSadaf Ebrahimi /** \struct VmaDefragmentationContext
2769*b7893ccfSSadaf Ebrahimi \brief Represents Opaque object that represents started defragmentation process.
2770*b7893ccfSSadaf Ebrahimi
2771*b7893ccfSSadaf Ebrahimi Fill structure #VmaDefragmentationInfo2 and call function vmaDefragmentationBegin() to create it.
2772*b7893ccfSSadaf Ebrahimi Call function vmaDefragmentationEnd() to destroy it.
2773*b7893ccfSSadaf Ebrahimi */
2774*b7893ccfSSadaf Ebrahimi VK_DEFINE_HANDLE(VmaDefragmentationContext)
2775*b7893ccfSSadaf Ebrahimi
2776*b7893ccfSSadaf Ebrahimi /// Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
2777*b7893ccfSSadaf Ebrahimi typedef enum VmaDefragmentationFlagBits {
2778*b7893ccfSSadaf Ebrahimi VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
2779*b7893ccfSSadaf Ebrahimi } VmaDefragmentationFlagBits;
2780*b7893ccfSSadaf Ebrahimi typedef VkFlags VmaDefragmentationFlags;
2781*b7893ccfSSadaf Ebrahimi
2782*b7893ccfSSadaf Ebrahimi /** \brief Parameters for defragmentation.
2783*b7893ccfSSadaf Ebrahimi
2784*b7893ccfSSadaf Ebrahimi To be used with function vmaDefragmentationBegin().
2785*b7893ccfSSadaf Ebrahimi */
2786*b7893ccfSSadaf Ebrahimi typedef struct VmaDefragmentationInfo2 {
2787*b7893ccfSSadaf Ebrahimi /** \brief Reserved for future use. Should be 0.
2788*b7893ccfSSadaf Ebrahimi */
2789*b7893ccfSSadaf Ebrahimi VmaDefragmentationFlags flags;
2790*b7893ccfSSadaf Ebrahimi /** \brief Number of allocations in `pAllocations` array.
2791*b7893ccfSSadaf Ebrahimi */
2792*b7893ccfSSadaf Ebrahimi uint32_t allocationCount;
2793*b7893ccfSSadaf Ebrahimi /** \brief Pointer to array of allocations that can be defragmented.
2794*b7893ccfSSadaf Ebrahimi
2795*b7893ccfSSadaf Ebrahimi The array should have `allocationCount` elements.
2796*b7893ccfSSadaf Ebrahimi The array should not contain nulls.
2797*b7893ccfSSadaf Ebrahimi Elements in the array should be unique - same allocation cannot occur twice.
2798*b7893ccfSSadaf Ebrahimi It is safe to pass allocations that are in the lost state - they are ignored.
2799*b7893ccfSSadaf Ebrahimi All allocations not present in this array are considered non-moveable during this defragmentation.
2800*b7893ccfSSadaf Ebrahimi */
2801*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocations;
2802*b7893ccfSSadaf Ebrahimi /** \brief Optional, output. Pointer to array that will be filled with information whether the allocation at certain index has been changed during defragmentation.
2803*b7893ccfSSadaf Ebrahimi
2804*b7893ccfSSadaf Ebrahimi The array should have `allocationCount` elements.
2805*b7893ccfSSadaf Ebrahimi You can pass null if you are not interested in this information.
2806*b7893ccfSSadaf Ebrahimi */
2807*b7893ccfSSadaf Ebrahimi VkBool32* pAllocationsChanged;
2808*b7893ccfSSadaf Ebrahimi /** \brief Numer of pools in `pPools` array.
2809*b7893ccfSSadaf Ebrahimi */
2810*b7893ccfSSadaf Ebrahimi uint32_t poolCount;
2811*b7893ccfSSadaf Ebrahimi /** \brief Either null or pointer to array of pools to be defragmented.
2812*b7893ccfSSadaf Ebrahimi
2813*b7893ccfSSadaf Ebrahimi All the allocations in the specified pools can be moved during defragmentation
2814*b7893ccfSSadaf Ebrahimi and there is no way to check if they were really moved as in `pAllocationsChanged`,
2815*b7893ccfSSadaf Ebrahimi so you must query all the allocations in all these pools for new `VkDeviceMemory`
2816*b7893ccfSSadaf Ebrahimi and offset using vmaGetAllocationInfo() if you might need to recreate buffers
2817*b7893ccfSSadaf Ebrahimi and images bound to them.
2818*b7893ccfSSadaf Ebrahimi
2819*b7893ccfSSadaf Ebrahimi The array should have `poolCount` elements.
2820*b7893ccfSSadaf Ebrahimi The array should not contain nulls.
2821*b7893ccfSSadaf Ebrahimi Elements in the array should be unique - same pool cannot occur twice.
2822*b7893ccfSSadaf Ebrahimi
2823*b7893ccfSSadaf Ebrahimi Using this array is equivalent to specifying all allocations from the pools in `pAllocations`.
2824*b7893ccfSSadaf Ebrahimi It might be more efficient.
2825*b7893ccfSSadaf Ebrahimi */
2826*b7893ccfSSadaf Ebrahimi VmaPool* pPools;
2827*b7893ccfSSadaf Ebrahimi /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on CPU side, like `memcpy()`, `memmove()`.
2828*b7893ccfSSadaf Ebrahimi
2829*b7893ccfSSadaf Ebrahimi `VK_WHOLE_SIZE` means no limit.
2830*b7893ccfSSadaf Ebrahimi */
2831*b7893ccfSSadaf Ebrahimi VkDeviceSize maxCpuBytesToMove;
2832*b7893ccfSSadaf Ebrahimi /** \brief Maximum number of allocations that can be moved to a different place using transfers on CPU side, like `memcpy()`, `memmove()`.
2833*b7893ccfSSadaf Ebrahimi
2834*b7893ccfSSadaf Ebrahimi `UINT32_MAX` means no limit.
2835*b7893ccfSSadaf Ebrahimi */
2836*b7893ccfSSadaf Ebrahimi uint32_t maxCpuAllocationsToMove;
2837*b7893ccfSSadaf Ebrahimi /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on GPU side, posted to `commandBuffer`.
2838*b7893ccfSSadaf Ebrahimi
2839*b7893ccfSSadaf Ebrahimi `VK_WHOLE_SIZE` means no limit.
2840*b7893ccfSSadaf Ebrahimi */
2841*b7893ccfSSadaf Ebrahimi VkDeviceSize maxGpuBytesToMove;
2842*b7893ccfSSadaf Ebrahimi /** \brief Maximum number of allocations that can be moved to a different place using transfers on GPU side, posted to `commandBuffer`.
2843*b7893ccfSSadaf Ebrahimi
2844*b7893ccfSSadaf Ebrahimi `UINT32_MAX` means no limit.
2845*b7893ccfSSadaf Ebrahimi */
2846*b7893ccfSSadaf Ebrahimi uint32_t maxGpuAllocationsToMove;
2847*b7893ccfSSadaf Ebrahimi /** \brief Optional. Command buffer where GPU copy commands will be posted.
2848*b7893ccfSSadaf Ebrahimi
2849*b7893ccfSSadaf Ebrahimi If not null, it must be a valid command buffer handle that supports Transfer queue type.
2850*b7893ccfSSadaf Ebrahimi It must be in the recording state and outside of a render pass instance.
2851*b7893ccfSSadaf Ebrahimi You need to submit it and make sure it finished execution before calling vmaDefragmentationEnd().
2852*b7893ccfSSadaf Ebrahimi
2853*b7893ccfSSadaf Ebrahimi Passing null means that only CPU defragmentation will be performed.
2854*b7893ccfSSadaf Ebrahimi */
2855*b7893ccfSSadaf Ebrahimi VkCommandBuffer commandBuffer;
2856*b7893ccfSSadaf Ebrahimi } VmaDefragmentationInfo2;
2857*b7893ccfSSadaf Ebrahimi
2858*b7893ccfSSadaf Ebrahimi /** \brief Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
2859*b7893ccfSSadaf Ebrahimi
2860*b7893ccfSSadaf Ebrahimi \deprecated This is a part of the old interface. It is recommended to use structure #VmaDefragmentationInfo2 and function vmaDefragmentationBegin() instead.
2861*b7893ccfSSadaf Ebrahimi */
2862*b7893ccfSSadaf Ebrahimi typedef struct VmaDefragmentationInfo {
2863*b7893ccfSSadaf Ebrahimi /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places.
2864*b7893ccfSSadaf Ebrahimi
2865*b7893ccfSSadaf Ebrahimi Default is `VK_WHOLE_SIZE`, which means no limit.
2866*b7893ccfSSadaf Ebrahimi */
2867*b7893ccfSSadaf Ebrahimi VkDeviceSize maxBytesToMove;
2868*b7893ccfSSadaf Ebrahimi /** \brief Maximum number of allocations that can be moved to different place.
2869*b7893ccfSSadaf Ebrahimi
2870*b7893ccfSSadaf Ebrahimi Default is `UINT32_MAX`, which means no limit.
2871*b7893ccfSSadaf Ebrahimi */
2872*b7893ccfSSadaf Ebrahimi uint32_t maxAllocationsToMove;
2873*b7893ccfSSadaf Ebrahimi } VmaDefragmentationInfo;
2874*b7893ccfSSadaf Ebrahimi
2875*b7893ccfSSadaf Ebrahimi /** \brief Statistics returned by function vmaDefragment(). */
2876*b7893ccfSSadaf Ebrahimi typedef struct VmaDefragmentationStats {
2877*b7893ccfSSadaf Ebrahimi /// Total number of bytes that have been copied while moving allocations to different places.
2878*b7893ccfSSadaf Ebrahimi VkDeviceSize bytesMoved;
2879*b7893ccfSSadaf Ebrahimi /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects.
2880*b7893ccfSSadaf Ebrahimi VkDeviceSize bytesFreed;
2881*b7893ccfSSadaf Ebrahimi /// Number of allocations that have been moved to different places.
2882*b7893ccfSSadaf Ebrahimi uint32_t allocationsMoved;
2883*b7893ccfSSadaf Ebrahimi /// Number of empty `VkDeviceMemory` objects that have been released to the system.
2884*b7893ccfSSadaf Ebrahimi uint32_t deviceMemoryBlocksFreed;
2885*b7893ccfSSadaf Ebrahimi } VmaDefragmentationStats;
2886*b7893ccfSSadaf Ebrahimi
2887*b7893ccfSSadaf Ebrahimi /** \brief Begins defragmentation process.
2888*b7893ccfSSadaf Ebrahimi
2889*b7893ccfSSadaf Ebrahimi @param allocator Allocator object.
2890*b7893ccfSSadaf Ebrahimi @param pInfo Structure filled with parameters of defragmentation.
2891*b7893ccfSSadaf Ebrahimi @param[out] pStats Optional. Statistics of defragmentation. You can pass null if you are not interested in this information.
2892*b7893ccfSSadaf Ebrahimi @param[out] pContext Context object that must be passed to vmaDefragmentationEnd() to finish defragmentation.
2893*b7893ccfSSadaf Ebrahimi @return `VK_SUCCESS` and `*pContext == null` if defragmentation finished within this function call. `VK_NOT_READY` and `*pContext != null` if defragmentation has been started and you need to call vmaDefragmentationEnd() to finish it. Negative value in case of error.
2894*b7893ccfSSadaf Ebrahimi
2895*b7893ccfSSadaf Ebrahimi Use this function instead of old, deprecated vmaDefragment().
2896*b7893ccfSSadaf Ebrahimi
2897*b7893ccfSSadaf Ebrahimi Warning! Between the call to vmaDefragmentationBegin() and vmaDefragmentationEnd():
2898*b7893ccfSSadaf Ebrahimi
2899*b7893ccfSSadaf Ebrahimi - You should not use any of allocations passed as `pInfo->pAllocations` or
2900*b7893ccfSSadaf Ebrahimi any allocations that belong to pools passed as `pInfo->pPools`,
2901*b7893ccfSSadaf Ebrahimi including calling vmaGetAllocationInfo(), vmaTouchAllocation(), or access
2902*b7893ccfSSadaf Ebrahimi their data.
2903*b7893ccfSSadaf Ebrahimi - Some mutexes protecting internal data structures may be locked, so trying to
2904*b7893ccfSSadaf Ebrahimi make or free any allocations, bind buffers or images, map memory, or launch
2905*b7893ccfSSadaf Ebrahimi another simultaneous defragmentation in between may cause stall (when done on
2906*b7893ccfSSadaf Ebrahimi another thread) or deadlock (when done on the same thread), unless you are
2907*b7893ccfSSadaf Ebrahimi 100% sure that defragmented allocations are in different pools.
2908*b7893ccfSSadaf Ebrahimi - Information returned via `pStats` and `pInfo->pAllocationsChanged` are undefined.
2909*b7893ccfSSadaf Ebrahimi They become valid after call to vmaDefragmentationEnd().
2910*b7893ccfSSadaf Ebrahimi - If `pInfo->commandBuffer` is not null, you must submit that command buffer
2911*b7893ccfSSadaf Ebrahimi and make sure it finished execution before calling vmaDefragmentationEnd().
2912*b7893ccfSSadaf Ebrahimi */
2913*b7893ccfSSadaf Ebrahimi VkResult vmaDefragmentationBegin(
2914*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2915*b7893ccfSSadaf Ebrahimi const VmaDefragmentationInfo2* pInfo,
2916*b7893ccfSSadaf Ebrahimi VmaDefragmentationStats* pStats,
2917*b7893ccfSSadaf Ebrahimi VmaDefragmentationContext *pContext);
2918*b7893ccfSSadaf Ebrahimi
2919*b7893ccfSSadaf Ebrahimi /** \brief Ends defragmentation process.
2920*b7893ccfSSadaf Ebrahimi
2921*b7893ccfSSadaf Ebrahimi Use this function to finish defragmentation started by vmaDefragmentationBegin().
2922*b7893ccfSSadaf Ebrahimi It is safe to pass `context == null`. The function then does nothing.
2923*b7893ccfSSadaf Ebrahimi */
2924*b7893ccfSSadaf Ebrahimi VkResult vmaDefragmentationEnd(
2925*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2926*b7893ccfSSadaf Ebrahimi VmaDefragmentationContext context);
2927*b7893ccfSSadaf Ebrahimi
2928*b7893ccfSSadaf Ebrahimi /** \brief Deprecated. Compacts memory by moving allocations.
2929*b7893ccfSSadaf Ebrahimi
2930*b7893ccfSSadaf Ebrahimi @param pAllocations Array of allocations that can be moved during this compation.
2931*b7893ccfSSadaf Ebrahimi @param allocationCount Number of elements in pAllocations and pAllocationsChanged arrays.
2932*b7893ccfSSadaf Ebrahimi @param[out] pAllocationsChanged Array of boolean values that will indicate whether matching allocation in pAllocations array has been moved. This parameter is optional. Pass null if you don't need this information.
2933*b7893ccfSSadaf Ebrahimi @param pDefragmentationInfo Configuration parameters. Optional - pass null to use default values.
2934*b7893ccfSSadaf Ebrahimi @param[out] pDefragmentationStats Statistics returned by the function. Optional - pass null if you don't need this information.
2935*b7893ccfSSadaf Ebrahimi @return `VK_SUCCESS` if completed, negative error code in case of error.
2936*b7893ccfSSadaf Ebrahimi
2937*b7893ccfSSadaf Ebrahimi \deprecated This is a part of the old interface. It is recommended to use structure #VmaDefragmentationInfo2 and function vmaDefragmentationBegin() instead.
2938*b7893ccfSSadaf Ebrahimi
2939*b7893ccfSSadaf Ebrahimi This function works by moving allocations to different places (different
2940*b7893ccfSSadaf Ebrahimi `VkDeviceMemory` objects and/or different offsets) in order to optimize memory
2941*b7893ccfSSadaf Ebrahimi usage. Only allocations that are in `pAllocations` array can be moved. All other
2942*b7893ccfSSadaf Ebrahimi allocations are considered nonmovable in this call. Basic rules:
2943*b7893ccfSSadaf Ebrahimi
2944*b7893ccfSSadaf Ebrahimi - Only allocations made in memory types that have
2945*b7893ccfSSadaf Ebrahimi `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`
2946*b7893ccfSSadaf Ebrahimi flags can be compacted. You may pass other allocations but it makes no sense -
2947*b7893ccfSSadaf Ebrahimi these will never be moved.
2948*b7893ccfSSadaf Ebrahimi - Custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT or
2949*b7893ccfSSadaf Ebrahimi #VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT flag are not defragmented. Allocations
2950*b7893ccfSSadaf Ebrahimi passed to this function that come from such pools are ignored.
2951*b7893ccfSSadaf Ebrahimi - Allocations created with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT or
2952*b7893ccfSSadaf Ebrahimi created as dedicated allocations for any other reason are also ignored.
2953*b7893ccfSSadaf Ebrahimi - Both allocations made with or without #VMA_ALLOCATION_CREATE_MAPPED_BIT
2954*b7893ccfSSadaf Ebrahimi flag can be compacted. If not persistently mapped, memory will be mapped
2955*b7893ccfSSadaf Ebrahimi temporarily inside this function if needed.
2956*b7893ccfSSadaf Ebrahimi - You must not pass same #VmaAllocation object multiple times in `pAllocations` array.
2957*b7893ccfSSadaf Ebrahimi
2958*b7893ccfSSadaf Ebrahimi The function also frees empty `VkDeviceMemory` blocks.
2959*b7893ccfSSadaf Ebrahimi
2960*b7893ccfSSadaf Ebrahimi Warning: This function may be time-consuming, so you shouldn't call it too often
2961*b7893ccfSSadaf Ebrahimi (like after every resource creation/destruction).
2962*b7893ccfSSadaf Ebrahimi You can call it on special occasions (like when reloading a game level or
2963*b7893ccfSSadaf Ebrahimi when you just destroyed a lot of objects). Calling it every frame may be OK, but
2964*b7893ccfSSadaf Ebrahimi you should measure that on your platform.
2965*b7893ccfSSadaf Ebrahimi
2966*b7893ccfSSadaf Ebrahimi For more information, see [Defragmentation](@ref defragmentation) chapter.
2967*b7893ccfSSadaf Ebrahimi */
2968*b7893ccfSSadaf Ebrahimi VkResult vmaDefragment(
2969*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2970*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocations,
2971*b7893ccfSSadaf Ebrahimi size_t allocationCount,
2972*b7893ccfSSadaf Ebrahimi VkBool32* pAllocationsChanged,
2973*b7893ccfSSadaf Ebrahimi const VmaDefragmentationInfo *pDefragmentationInfo,
2974*b7893ccfSSadaf Ebrahimi VmaDefragmentationStats* pDefragmentationStats);
2975*b7893ccfSSadaf Ebrahimi
2976*b7893ccfSSadaf Ebrahimi /** \brief Binds buffer to allocation.
2977*b7893ccfSSadaf Ebrahimi
2978*b7893ccfSSadaf Ebrahimi Binds specified buffer to region of memory represented by specified allocation.
2979*b7893ccfSSadaf Ebrahimi Gets `VkDeviceMemory` handle and offset from the allocation.
2980*b7893ccfSSadaf Ebrahimi If you want to create a buffer, allocate memory for it and bind them together separately,
2981*b7893ccfSSadaf Ebrahimi you should use this function for binding instead of standard `vkBindBufferMemory()`,
2982*b7893ccfSSadaf Ebrahimi because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
2983*b7893ccfSSadaf Ebrahimi allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
2984*b7893ccfSSadaf Ebrahimi (which is illegal in Vulkan).
2985*b7893ccfSSadaf Ebrahimi
2986*b7893ccfSSadaf Ebrahimi It is recommended to use function vmaCreateBuffer() instead of this one.
2987*b7893ccfSSadaf Ebrahimi */
2988*b7893ccfSSadaf Ebrahimi VkResult vmaBindBufferMemory(
2989*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
2990*b7893ccfSSadaf Ebrahimi VmaAllocation allocation,
2991*b7893ccfSSadaf Ebrahimi VkBuffer buffer);
2992*b7893ccfSSadaf Ebrahimi
2993*b7893ccfSSadaf Ebrahimi /** \brief Binds image to allocation.
2994*b7893ccfSSadaf Ebrahimi
2995*b7893ccfSSadaf Ebrahimi Binds specified image to region of memory represented by specified allocation.
2996*b7893ccfSSadaf Ebrahimi Gets `VkDeviceMemory` handle and offset from the allocation.
2997*b7893ccfSSadaf Ebrahimi If you want to create an image, allocate memory for it and bind them together separately,
2998*b7893ccfSSadaf Ebrahimi you should use this function for binding instead of standard `vkBindImageMemory()`,
2999*b7893ccfSSadaf Ebrahimi because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
3000*b7893ccfSSadaf Ebrahimi allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
3001*b7893ccfSSadaf Ebrahimi (which is illegal in Vulkan).
3002*b7893ccfSSadaf Ebrahimi
3003*b7893ccfSSadaf Ebrahimi It is recommended to use function vmaCreateImage() instead of this one.
3004*b7893ccfSSadaf Ebrahimi */
3005*b7893ccfSSadaf Ebrahimi VkResult vmaBindImageMemory(
3006*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
3007*b7893ccfSSadaf Ebrahimi VmaAllocation allocation,
3008*b7893ccfSSadaf Ebrahimi VkImage image);
3009*b7893ccfSSadaf Ebrahimi
3010*b7893ccfSSadaf Ebrahimi /**
3011*b7893ccfSSadaf Ebrahimi @param[out] pBuffer Buffer that was created.
3012*b7893ccfSSadaf Ebrahimi @param[out] pAllocation Allocation that was created.
3013*b7893ccfSSadaf Ebrahimi @param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
3014*b7893ccfSSadaf Ebrahimi
3015*b7893ccfSSadaf Ebrahimi This function automatically:
3016*b7893ccfSSadaf Ebrahimi
3017*b7893ccfSSadaf Ebrahimi -# Creates buffer.
3018*b7893ccfSSadaf Ebrahimi -# Allocates appropriate memory for it.
3019*b7893ccfSSadaf Ebrahimi -# Binds the buffer with the memory.
3020*b7893ccfSSadaf Ebrahimi
3021*b7893ccfSSadaf Ebrahimi If any of these operations fail, buffer and allocation are not created,
3022*b7893ccfSSadaf Ebrahimi returned value is negative error code, *pBuffer and *pAllocation are null.
3023*b7893ccfSSadaf Ebrahimi
3024*b7893ccfSSadaf Ebrahimi If the function succeeded, you must destroy both buffer and allocation when you
3025*b7893ccfSSadaf Ebrahimi no longer need them using either convenience function vmaDestroyBuffer() or
3026*b7893ccfSSadaf Ebrahimi separately, using `vkDestroyBuffer()` and vmaFreeMemory().
3027*b7893ccfSSadaf Ebrahimi
3028*b7893ccfSSadaf Ebrahimi If VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used,
3029*b7893ccfSSadaf Ebrahimi VK_KHR_dedicated_allocation extension is used internally to query driver whether
3030*b7893ccfSSadaf Ebrahimi it requires or prefers the new buffer to have dedicated allocation. If yes,
3031*b7893ccfSSadaf Ebrahimi and if dedicated allocation is possible (VmaAllocationCreateInfo::pool is null
3032*b7893ccfSSadaf Ebrahimi and VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated
3033*b7893ccfSSadaf Ebrahimi allocation for this buffer, just like when using
3034*b7893ccfSSadaf Ebrahimi VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
3035*b7893ccfSSadaf Ebrahimi */
3036*b7893ccfSSadaf Ebrahimi VkResult vmaCreateBuffer(
3037*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
3038*b7893ccfSSadaf Ebrahimi const VkBufferCreateInfo* pBufferCreateInfo,
3039*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo* pAllocationCreateInfo,
3040*b7893ccfSSadaf Ebrahimi VkBuffer* pBuffer,
3041*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocation,
3042*b7893ccfSSadaf Ebrahimi VmaAllocationInfo* pAllocationInfo);
3043*b7893ccfSSadaf Ebrahimi
3044*b7893ccfSSadaf Ebrahimi /** \brief Destroys Vulkan buffer and frees allocated memory.
3045*b7893ccfSSadaf Ebrahimi
3046*b7893ccfSSadaf Ebrahimi This is just a convenience function equivalent to:
3047*b7893ccfSSadaf Ebrahimi
3048*b7893ccfSSadaf Ebrahimi \code
3049*b7893ccfSSadaf Ebrahimi vkDestroyBuffer(device, buffer, allocationCallbacks);
3050*b7893ccfSSadaf Ebrahimi vmaFreeMemory(allocator, allocation);
3051*b7893ccfSSadaf Ebrahimi \endcode
3052*b7893ccfSSadaf Ebrahimi
3053*b7893ccfSSadaf Ebrahimi It it safe to pass null as buffer and/or allocation.
3054*b7893ccfSSadaf Ebrahimi */
3055*b7893ccfSSadaf Ebrahimi void vmaDestroyBuffer(
3056*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
3057*b7893ccfSSadaf Ebrahimi VkBuffer buffer,
3058*b7893ccfSSadaf Ebrahimi VmaAllocation allocation);
3059*b7893ccfSSadaf Ebrahimi
3060*b7893ccfSSadaf Ebrahimi /// Function similar to vmaCreateBuffer().
3061*b7893ccfSSadaf Ebrahimi VkResult vmaCreateImage(
3062*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
3063*b7893ccfSSadaf Ebrahimi const VkImageCreateInfo* pImageCreateInfo,
3064*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo* pAllocationCreateInfo,
3065*b7893ccfSSadaf Ebrahimi VkImage* pImage,
3066*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocation,
3067*b7893ccfSSadaf Ebrahimi VmaAllocationInfo* pAllocationInfo);
3068*b7893ccfSSadaf Ebrahimi
3069*b7893ccfSSadaf Ebrahimi /** \brief Destroys Vulkan image and frees allocated memory.
3070*b7893ccfSSadaf Ebrahimi
3071*b7893ccfSSadaf Ebrahimi This is just a convenience function equivalent to:
3072*b7893ccfSSadaf Ebrahimi
3073*b7893ccfSSadaf Ebrahimi \code
3074*b7893ccfSSadaf Ebrahimi vkDestroyImage(device, image, allocationCallbacks);
3075*b7893ccfSSadaf Ebrahimi vmaFreeMemory(allocator, allocation);
3076*b7893ccfSSadaf Ebrahimi \endcode
3077*b7893ccfSSadaf Ebrahimi
3078*b7893ccfSSadaf Ebrahimi It it safe to pass null as image and/or allocation.
3079*b7893ccfSSadaf Ebrahimi */
3080*b7893ccfSSadaf Ebrahimi void vmaDestroyImage(
3081*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
3082*b7893ccfSSadaf Ebrahimi VkImage image,
3083*b7893ccfSSadaf Ebrahimi VmaAllocation allocation);
3084*b7893ccfSSadaf Ebrahimi
3085*b7893ccfSSadaf Ebrahimi #ifdef __cplusplus
3086*b7893ccfSSadaf Ebrahimi }
3087*b7893ccfSSadaf Ebrahimi #endif
3088*b7893ccfSSadaf Ebrahimi
3089*b7893ccfSSadaf Ebrahimi #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3090*b7893ccfSSadaf Ebrahimi
3091*b7893ccfSSadaf Ebrahimi // For Visual Studio IntelliSense.
3092*b7893ccfSSadaf Ebrahimi #if defined(__cplusplus) && defined(__INTELLISENSE__)
3093*b7893ccfSSadaf Ebrahimi #define VMA_IMPLEMENTATION
3094*b7893ccfSSadaf Ebrahimi #endif
3095*b7893ccfSSadaf Ebrahimi
3096*b7893ccfSSadaf Ebrahimi #ifdef VMA_IMPLEMENTATION
3097*b7893ccfSSadaf Ebrahimi #undef VMA_IMPLEMENTATION
3098*b7893ccfSSadaf Ebrahimi
3099*b7893ccfSSadaf Ebrahimi #include <cstdint>
3100*b7893ccfSSadaf Ebrahimi #include <cstdlib>
3101*b7893ccfSSadaf Ebrahimi #include <cstring>
3102*b7893ccfSSadaf Ebrahimi
3103*b7893ccfSSadaf Ebrahimi /*******************************************************************************
3104*b7893ccfSSadaf Ebrahimi CONFIGURATION SECTION
3105*b7893ccfSSadaf Ebrahimi
3106*b7893ccfSSadaf Ebrahimi Define some of these macros before each #include of this header or change them
3107*b7893ccfSSadaf Ebrahimi here if you need other then default behavior depending on your environment.
3108*b7893ccfSSadaf Ebrahimi */
3109*b7893ccfSSadaf Ebrahimi
3110*b7893ccfSSadaf Ebrahimi /*
3111*b7893ccfSSadaf Ebrahimi Define this macro to 1 to make the library fetch pointers to Vulkan functions
3112*b7893ccfSSadaf Ebrahimi internally, like:
3113*b7893ccfSSadaf Ebrahimi
3114*b7893ccfSSadaf Ebrahimi vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3115*b7893ccfSSadaf Ebrahimi
3116*b7893ccfSSadaf Ebrahimi Define to 0 if you are going to provide you own pointers to Vulkan functions via
3117*b7893ccfSSadaf Ebrahimi VmaAllocatorCreateInfo::pVulkanFunctions.
3118*b7893ccfSSadaf Ebrahimi */
3119*b7893ccfSSadaf Ebrahimi #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3120*b7893ccfSSadaf Ebrahimi #define VMA_STATIC_VULKAN_FUNCTIONS 1
3121*b7893ccfSSadaf Ebrahimi #endif
3122*b7893ccfSSadaf Ebrahimi
3123*b7893ccfSSadaf Ebrahimi // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3124*b7893ccfSSadaf Ebrahimi //#define VMA_USE_STL_CONTAINERS 1
3125*b7893ccfSSadaf Ebrahimi
3126*b7893ccfSSadaf Ebrahimi /* Set this macro to 1 to make the library including and using STL containers:
3127*b7893ccfSSadaf Ebrahimi std::pair, std::vector, std::list, std::unordered_map.
3128*b7893ccfSSadaf Ebrahimi
3129*b7893ccfSSadaf Ebrahimi Set it to 0 or undefined to make the library using its own implementation of
3130*b7893ccfSSadaf Ebrahimi the containers.
3131*b7893ccfSSadaf Ebrahimi */
3132*b7893ccfSSadaf Ebrahimi #if VMA_USE_STL_CONTAINERS
3133*b7893ccfSSadaf Ebrahimi #define VMA_USE_STL_VECTOR 1
3134*b7893ccfSSadaf Ebrahimi #define VMA_USE_STL_UNORDERED_MAP 1
3135*b7893ccfSSadaf Ebrahimi #define VMA_USE_STL_LIST 1
3136*b7893ccfSSadaf Ebrahimi #endif
3137*b7893ccfSSadaf Ebrahimi
3138*b7893ccfSSadaf Ebrahimi #ifndef VMA_USE_STL_SHARED_MUTEX
3139*b7893ccfSSadaf Ebrahimi // Minimum Visual Studio 2015 Update 2
3140*b7893ccfSSadaf Ebrahimi #if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && NTDDI_VERSION > NTDDI_WIN10_RS2
3141*b7893ccfSSadaf Ebrahimi #define VMA_USE_STL_SHARED_MUTEX 1
3142*b7893ccfSSadaf Ebrahimi #endif
3143*b7893ccfSSadaf Ebrahimi #endif
3144*b7893ccfSSadaf Ebrahimi
3145*b7893ccfSSadaf Ebrahimi #if VMA_USE_STL_VECTOR
3146*b7893ccfSSadaf Ebrahimi #include <vector>
3147*b7893ccfSSadaf Ebrahimi #endif
3148*b7893ccfSSadaf Ebrahimi
3149*b7893ccfSSadaf Ebrahimi #if VMA_USE_STL_UNORDERED_MAP
3150*b7893ccfSSadaf Ebrahimi #include <unordered_map>
3151*b7893ccfSSadaf Ebrahimi #endif
3152*b7893ccfSSadaf Ebrahimi
3153*b7893ccfSSadaf Ebrahimi #if VMA_USE_STL_LIST
3154*b7893ccfSSadaf Ebrahimi #include <list>
3155*b7893ccfSSadaf Ebrahimi #endif
3156*b7893ccfSSadaf Ebrahimi
3157*b7893ccfSSadaf Ebrahimi /*
3158*b7893ccfSSadaf Ebrahimi Following headers are used in this CONFIGURATION section only, so feel free to
3159*b7893ccfSSadaf Ebrahimi remove them if not needed.
3160*b7893ccfSSadaf Ebrahimi */
3161*b7893ccfSSadaf Ebrahimi #include <cassert> // for assert
3162*b7893ccfSSadaf Ebrahimi #include <algorithm> // for min, max
3163*b7893ccfSSadaf Ebrahimi #include <mutex>
3164*b7893ccfSSadaf Ebrahimi #include <atomic> // for std::atomic
3165*b7893ccfSSadaf Ebrahimi
3166*b7893ccfSSadaf Ebrahimi #ifndef VMA_NULL
3167*b7893ccfSSadaf Ebrahimi // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3168*b7893ccfSSadaf Ebrahimi #define VMA_NULL nullptr
3169*b7893ccfSSadaf Ebrahimi #endif
3170*b7893ccfSSadaf Ebrahimi
3171*b7893ccfSSadaf Ebrahimi #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3172*b7893ccfSSadaf Ebrahimi #include <cstdlib>
aligned_alloc(size_t alignment,size_t size)3173*b7893ccfSSadaf Ebrahimi void *aligned_alloc(size_t alignment, size_t size)
3174*b7893ccfSSadaf Ebrahimi {
3175*b7893ccfSSadaf Ebrahimi // alignment must be >= sizeof(void*)
3176*b7893ccfSSadaf Ebrahimi if(alignment < sizeof(void*))
3177*b7893ccfSSadaf Ebrahimi {
3178*b7893ccfSSadaf Ebrahimi alignment = sizeof(void*);
3179*b7893ccfSSadaf Ebrahimi }
3180*b7893ccfSSadaf Ebrahimi
3181*b7893ccfSSadaf Ebrahimi return memalign(alignment, size);
3182*b7893ccfSSadaf Ebrahimi }
3183*b7893ccfSSadaf Ebrahimi #elif defined(__APPLE__) || defined(__ANDROID__)
3184*b7893ccfSSadaf Ebrahimi # define ALIGNED_ALLOC_WITH_POSIX_MEMALIGN
3185*b7893ccfSSadaf Ebrahimi #elif defined(__GNU_LIBRARY__)
3186*b7893ccfSSadaf Ebrahimi # if !defined(__GLIBC_PREREQ) || !__GLIBC_PREREQ(2, 16)
3187*b7893ccfSSadaf Ebrahimi // aligned_alloc() is defined in glibc only for version >= 2.16
3188*b7893ccfSSadaf Ebrahimi # define ALIGNED_ALLOC_WITH_POSIX_MEMALIGN
3189*b7893ccfSSadaf Ebrahimi # endif
3190*b7893ccfSSadaf Ebrahimi #endif
3191*b7893ccfSSadaf Ebrahimi
3192*b7893ccfSSadaf Ebrahimi #ifdef ALIGNED_ALLOC_WITH_POSIX_MEMALIGN
3193*b7893ccfSSadaf Ebrahimi #include <cstdlib>
aligned_alloc(size_t alignment,size_t size)3194*b7893ccfSSadaf Ebrahimi void *aligned_alloc(size_t alignment, size_t size)
3195*b7893ccfSSadaf Ebrahimi {
3196*b7893ccfSSadaf Ebrahimi // alignment must be >= sizeof(void*)
3197*b7893ccfSSadaf Ebrahimi if(alignment < sizeof(void*))
3198*b7893ccfSSadaf Ebrahimi {
3199*b7893ccfSSadaf Ebrahimi alignment = sizeof(void*);
3200*b7893ccfSSadaf Ebrahimi }
3201*b7893ccfSSadaf Ebrahimi
3202*b7893ccfSSadaf Ebrahimi void *pointer;
3203*b7893ccfSSadaf Ebrahimi if(posix_memalign(&pointer, alignment, size) == 0)
3204*b7893ccfSSadaf Ebrahimi return pointer;
3205*b7893ccfSSadaf Ebrahimi return VMA_NULL;
3206*b7893ccfSSadaf Ebrahimi }
3207*b7893ccfSSadaf Ebrahimi #endif
3208*b7893ccfSSadaf Ebrahimi
3209*b7893ccfSSadaf Ebrahimi // If your compiler is not compatible with C++11 and definition of
3210*b7893ccfSSadaf Ebrahimi // aligned_alloc() function is missing, uncommeting following line may help:
3211*b7893ccfSSadaf Ebrahimi
3212*b7893ccfSSadaf Ebrahimi //#include <malloc.h>
3213*b7893ccfSSadaf Ebrahimi
3214*b7893ccfSSadaf Ebrahimi // Normal assert to check for programmer's errors, especially in Debug configuration.
3215*b7893ccfSSadaf Ebrahimi #ifndef VMA_ASSERT
3216*b7893ccfSSadaf Ebrahimi #ifdef _DEBUG
3217*b7893ccfSSadaf Ebrahimi #define VMA_ASSERT(expr) assert(expr)
3218*b7893ccfSSadaf Ebrahimi #else
3219*b7893ccfSSadaf Ebrahimi #define VMA_ASSERT(expr)
3220*b7893ccfSSadaf Ebrahimi #endif
3221*b7893ccfSSadaf Ebrahimi #endif
3222*b7893ccfSSadaf Ebrahimi
3223*b7893ccfSSadaf Ebrahimi // Assert that will be called very often, like inside data structures e.g. operator[].
3224*b7893ccfSSadaf Ebrahimi // Making it non-empty can make program slow.
3225*b7893ccfSSadaf Ebrahimi #ifndef VMA_HEAVY_ASSERT
3226*b7893ccfSSadaf Ebrahimi #ifdef _DEBUG
3227*b7893ccfSSadaf Ebrahimi #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3228*b7893ccfSSadaf Ebrahimi #else
3229*b7893ccfSSadaf Ebrahimi #define VMA_HEAVY_ASSERT(expr)
3230*b7893ccfSSadaf Ebrahimi #endif
3231*b7893ccfSSadaf Ebrahimi #endif
3232*b7893ccfSSadaf Ebrahimi
3233*b7893ccfSSadaf Ebrahimi #ifndef VMA_ALIGN_OF
3234*b7893ccfSSadaf Ebrahimi #define VMA_ALIGN_OF(type) (__alignof(type))
3235*b7893ccfSSadaf Ebrahimi #endif
3236*b7893ccfSSadaf Ebrahimi
3237*b7893ccfSSadaf Ebrahimi #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3238*b7893ccfSSadaf Ebrahimi #if defined(_WIN32)
3239*b7893ccfSSadaf Ebrahimi #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3240*b7893ccfSSadaf Ebrahimi #else
3241*b7893ccfSSadaf Ebrahimi #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3242*b7893ccfSSadaf Ebrahimi #endif
3243*b7893ccfSSadaf Ebrahimi #endif
3244*b7893ccfSSadaf Ebrahimi
3245*b7893ccfSSadaf Ebrahimi #ifndef VMA_SYSTEM_FREE
3246*b7893ccfSSadaf Ebrahimi #if defined(_WIN32)
3247*b7893ccfSSadaf Ebrahimi #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3248*b7893ccfSSadaf Ebrahimi #else
3249*b7893ccfSSadaf Ebrahimi #define VMA_SYSTEM_FREE(ptr) free(ptr)
3250*b7893ccfSSadaf Ebrahimi #endif
3251*b7893ccfSSadaf Ebrahimi #endif
3252*b7893ccfSSadaf Ebrahimi
3253*b7893ccfSSadaf Ebrahimi #ifndef VMA_MIN
3254*b7893ccfSSadaf Ebrahimi #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3255*b7893ccfSSadaf Ebrahimi #endif
3256*b7893ccfSSadaf Ebrahimi
3257*b7893ccfSSadaf Ebrahimi #ifndef VMA_MAX
3258*b7893ccfSSadaf Ebrahimi #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3259*b7893ccfSSadaf Ebrahimi #endif
3260*b7893ccfSSadaf Ebrahimi
3261*b7893ccfSSadaf Ebrahimi #ifndef VMA_SWAP
3262*b7893ccfSSadaf Ebrahimi #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3263*b7893ccfSSadaf Ebrahimi #endif
3264*b7893ccfSSadaf Ebrahimi
3265*b7893ccfSSadaf Ebrahimi #ifndef VMA_SORT
3266*b7893ccfSSadaf Ebrahimi #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3267*b7893ccfSSadaf Ebrahimi #endif
3268*b7893ccfSSadaf Ebrahimi
3269*b7893ccfSSadaf Ebrahimi #ifndef VMA_DEBUG_LOG
3270*b7893ccfSSadaf Ebrahimi #define VMA_DEBUG_LOG(format, ...)
3271*b7893ccfSSadaf Ebrahimi /*
3272*b7893ccfSSadaf Ebrahimi #define VMA_DEBUG_LOG(format, ...) do { \
3273*b7893ccfSSadaf Ebrahimi printf(format, __VA_ARGS__); \
3274*b7893ccfSSadaf Ebrahimi printf("\n"); \
3275*b7893ccfSSadaf Ebrahimi } while(false)
3276*b7893ccfSSadaf Ebrahimi */
3277*b7893ccfSSadaf Ebrahimi #endif
3278*b7893ccfSSadaf Ebrahimi
3279*b7893ccfSSadaf Ebrahimi // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3280*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
VmaUint32ToStr(char * outStr,size_t strLen,uint32_t num)3281*b7893ccfSSadaf Ebrahimi static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3282*b7893ccfSSadaf Ebrahimi {
3283*b7893ccfSSadaf Ebrahimi snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3284*b7893ccfSSadaf Ebrahimi }
VmaUint64ToStr(char * outStr,size_t strLen,uint64_t num)3285*b7893ccfSSadaf Ebrahimi static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3286*b7893ccfSSadaf Ebrahimi {
3287*b7893ccfSSadaf Ebrahimi snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3288*b7893ccfSSadaf Ebrahimi }
VmaPtrToStr(char * outStr,size_t strLen,const void * ptr)3289*b7893ccfSSadaf Ebrahimi static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3290*b7893ccfSSadaf Ebrahimi {
3291*b7893ccfSSadaf Ebrahimi snprintf(outStr, strLen, "%p", ptr);
3292*b7893ccfSSadaf Ebrahimi }
3293*b7893ccfSSadaf Ebrahimi #endif
3294*b7893ccfSSadaf Ebrahimi
3295*b7893ccfSSadaf Ebrahimi #ifndef VMA_MUTEX
3296*b7893ccfSSadaf Ebrahimi class VmaMutex
3297*b7893ccfSSadaf Ebrahimi {
3298*b7893ccfSSadaf Ebrahimi public:
Lock()3299*b7893ccfSSadaf Ebrahimi void Lock() { m_Mutex.lock(); }
Unlock()3300*b7893ccfSSadaf Ebrahimi void Unlock() { m_Mutex.unlock(); }
3301*b7893ccfSSadaf Ebrahimi private:
3302*b7893ccfSSadaf Ebrahimi std::mutex m_Mutex;
3303*b7893ccfSSadaf Ebrahimi };
3304*b7893ccfSSadaf Ebrahimi #define VMA_MUTEX VmaMutex
3305*b7893ccfSSadaf Ebrahimi #endif
3306*b7893ccfSSadaf Ebrahimi
3307*b7893ccfSSadaf Ebrahimi // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3308*b7893ccfSSadaf Ebrahimi #ifndef VMA_RW_MUTEX
3309*b7893ccfSSadaf Ebrahimi #if VMA_USE_STL_SHARED_MUTEX
3310*b7893ccfSSadaf Ebrahimi // Use std::shared_mutex from C++17.
3311*b7893ccfSSadaf Ebrahimi #include <shared_mutex>
3312*b7893ccfSSadaf Ebrahimi class VmaRWMutex
3313*b7893ccfSSadaf Ebrahimi {
3314*b7893ccfSSadaf Ebrahimi public:
LockRead()3315*b7893ccfSSadaf Ebrahimi void LockRead() { m_Mutex.lock_shared(); }
UnlockRead()3316*b7893ccfSSadaf Ebrahimi void UnlockRead() { m_Mutex.unlock_shared(); }
LockWrite()3317*b7893ccfSSadaf Ebrahimi void LockWrite() { m_Mutex.lock(); }
UnlockWrite()3318*b7893ccfSSadaf Ebrahimi void UnlockWrite() { m_Mutex.unlock(); }
3319*b7893ccfSSadaf Ebrahimi private:
3320*b7893ccfSSadaf Ebrahimi std::shared_mutex m_Mutex;
3321*b7893ccfSSadaf Ebrahimi };
3322*b7893ccfSSadaf Ebrahimi #define VMA_RW_MUTEX VmaRWMutex
3323*b7893ccfSSadaf Ebrahimi #elif defined(_WIN32)
3324*b7893ccfSSadaf Ebrahimi // Use SRWLOCK from WinAPI.
3325*b7893ccfSSadaf Ebrahimi class VmaRWMutex
3326*b7893ccfSSadaf Ebrahimi {
3327*b7893ccfSSadaf Ebrahimi public:
VmaRWMutex()3328*b7893ccfSSadaf Ebrahimi VmaRWMutex() { InitializeSRWLock(&m_Lock); }
LockRead()3329*b7893ccfSSadaf Ebrahimi void LockRead() { AcquireSRWLockShared(&m_Lock); }
UnlockRead()3330*b7893ccfSSadaf Ebrahimi void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
LockWrite()3331*b7893ccfSSadaf Ebrahimi void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
UnlockWrite()3332*b7893ccfSSadaf Ebrahimi void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3333*b7893ccfSSadaf Ebrahimi private:
3334*b7893ccfSSadaf Ebrahimi SRWLOCK m_Lock;
3335*b7893ccfSSadaf Ebrahimi };
3336*b7893ccfSSadaf Ebrahimi #define VMA_RW_MUTEX VmaRWMutex
3337*b7893ccfSSadaf Ebrahimi #else
3338*b7893ccfSSadaf Ebrahimi // Less efficient fallback: Use normal mutex.
3339*b7893ccfSSadaf Ebrahimi class VmaRWMutex
3340*b7893ccfSSadaf Ebrahimi {
3341*b7893ccfSSadaf Ebrahimi public:
LockRead()3342*b7893ccfSSadaf Ebrahimi void LockRead() { m_Mutex.Lock(); }
UnlockRead()3343*b7893ccfSSadaf Ebrahimi void UnlockRead() { m_Mutex.Unlock(); }
LockWrite()3344*b7893ccfSSadaf Ebrahimi void LockWrite() { m_Mutex.Lock(); }
UnlockWrite()3345*b7893ccfSSadaf Ebrahimi void UnlockWrite() { m_Mutex.Unlock(); }
3346*b7893ccfSSadaf Ebrahimi private:
3347*b7893ccfSSadaf Ebrahimi VMA_MUTEX m_Mutex;
3348*b7893ccfSSadaf Ebrahimi };
3349*b7893ccfSSadaf Ebrahimi #define VMA_RW_MUTEX VmaRWMutex
3350*b7893ccfSSadaf Ebrahimi #endif // #if VMA_USE_STL_SHARED_MUTEX
3351*b7893ccfSSadaf Ebrahimi #endif // #ifndef VMA_RW_MUTEX
3352*b7893ccfSSadaf Ebrahimi
3353*b7893ccfSSadaf Ebrahimi /*
3354*b7893ccfSSadaf Ebrahimi If providing your own implementation, you need to implement a subset of std::atomic:
3355*b7893ccfSSadaf Ebrahimi
3356*b7893ccfSSadaf Ebrahimi - Constructor(uint32_t desired)
3357*b7893ccfSSadaf Ebrahimi - uint32_t load() const
3358*b7893ccfSSadaf Ebrahimi - void store(uint32_t desired)
3359*b7893ccfSSadaf Ebrahimi - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3360*b7893ccfSSadaf Ebrahimi */
3361*b7893ccfSSadaf Ebrahimi #ifndef VMA_ATOMIC_UINT32
3362*b7893ccfSSadaf Ebrahimi #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3363*b7893ccfSSadaf Ebrahimi #endif
3364*b7893ccfSSadaf Ebrahimi
3365*b7893ccfSSadaf Ebrahimi #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3366*b7893ccfSSadaf Ebrahimi /**
3367*b7893ccfSSadaf Ebrahimi Every allocation will have its own memory block.
3368*b7893ccfSSadaf Ebrahimi Define to 1 for debugging purposes only.
3369*b7893ccfSSadaf Ebrahimi */
3370*b7893ccfSSadaf Ebrahimi #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3371*b7893ccfSSadaf Ebrahimi #endif
3372*b7893ccfSSadaf Ebrahimi
3373*b7893ccfSSadaf Ebrahimi #ifndef VMA_DEBUG_ALIGNMENT
3374*b7893ccfSSadaf Ebrahimi /**
3375*b7893ccfSSadaf Ebrahimi Minimum alignment of all allocations, in bytes.
3376*b7893ccfSSadaf Ebrahimi Set to more than 1 for debugging purposes only. Must be power of two.
3377*b7893ccfSSadaf Ebrahimi */
3378*b7893ccfSSadaf Ebrahimi #define VMA_DEBUG_ALIGNMENT (1)
3379*b7893ccfSSadaf Ebrahimi #endif
3380*b7893ccfSSadaf Ebrahimi
3381*b7893ccfSSadaf Ebrahimi #ifndef VMA_DEBUG_MARGIN
3382*b7893ccfSSadaf Ebrahimi /**
3383*b7893ccfSSadaf Ebrahimi Minimum margin before and after every allocation, in bytes.
3384*b7893ccfSSadaf Ebrahimi Set nonzero for debugging purposes only.
3385*b7893ccfSSadaf Ebrahimi */
3386*b7893ccfSSadaf Ebrahimi #define VMA_DEBUG_MARGIN (0)
3387*b7893ccfSSadaf Ebrahimi #endif
3388*b7893ccfSSadaf Ebrahimi
3389*b7893ccfSSadaf Ebrahimi #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3390*b7893ccfSSadaf Ebrahimi /**
3391*b7893ccfSSadaf Ebrahimi Define this macro to 1 to automatically fill new allocations and destroyed
3392*b7893ccfSSadaf Ebrahimi allocations with some bit pattern.
3393*b7893ccfSSadaf Ebrahimi */
3394*b7893ccfSSadaf Ebrahimi #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3395*b7893ccfSSadaf Ebrahimi #endif
3396*b7893ccfSSadaf Ebrahimi
3397*b7893ccfSSadaf Ebrahimi #ifndef VMA_DEBUG_DETECT_CORRUPTION
3398*b7893ccfSSadaf Ebrahimi /**
3399*b7893ccfSSadaf Ebrahimi Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to
3400*b7893ccfSSadaf Ebrahimi enable writing magic value to the margin before and after every allocation and
3401*b7893ccfSSadaf Ebrahimi validating it, so that memory corruptions (out-of-bounds writes) are detected.
3402*b7893ccfSSadaf Ebrahimi */
3403*b7893ccfSSadaf Ebrahimi #define VMA_DEBUG_DETECT_CORRUPTION (0)
3404*b7893ccfSSadaf Ebrahimi #endif
3405*b7893ccfSSadaf Ebrahimi
3406*b7893ccfSSadaf Ebrahimi #ifndef VMA_DEBUG_GLOBAL_MUTEX
3407*b7893ccfSSadaf Ebrahimi /**
3408*b7893ccfSSadaf Ebrahimi Set this to 1 for debugging purposes only, to enable single mutex protecting all
3409*b7893ccfSSadaf Ebrahimi entry calls to the library. Can be useful for debugging multithreading issues.
3410*b7893ccfSSadaf Ebrahimi */
3411*b7893ccfSSadaf Ebrahimi #define VMA_DEBUG_GLOBAL_MUTEX (0)
3412*b7893ccfSSadaf Ebrahimi #endif
3413*b7893ccfSSadaf Ebrahimi
3414*b7893ccfSSadaf Ebrahimi #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3415*b7893ccfSSadaf Ebrahimi /**
3416*b7893ccfSSadaf Ebrahimi Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity.
3417*b7893ccfSSadaf Ebrahimi Set to more than 1 for debugging purposes only. Must be power of two.
3418*b7893ccfSSadaf Ebrahimi */
3419*b7893ccfSSadaf Ebrahimi #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3420*b7893ccfSSadaf Ebrahimi #endif
3421*b7893ccfSSadaf Ebrahimi
3422*b7893ccfSSadaf Ebrahimi #ifndef VMA_SMALL_HEAP_MAX_SIZE
3423*b7893ccfSSadaf Ebrahimi /// Maximum size of a memory heap in Vulkan to consider it "small".
3424*b7893ccfSSadaf Ebrahimi #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3425*b7893ccfSSadaf Ebrahimi #endif
3426*b7893ccfSSadaf Ebrahimi
3427*b7893ccfSSadaf Ebrahimi #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3428*b7893ccfSSadaf Ebrahimi /// Default size of a block allocated as single VkDeviceMemory from a "large" heap.
3429*b7893ccfSSadaf Ebrahimi #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3430*b7893ccfSSadaf Ebrahimi #endif
3431*b7893ccfSSadaf Ebrahimi
3432*b7893ccfSSadaf Ebrahimi #ifndef VMA_CLASS_NO_COPY
3433*b7893ccfSSadaf Ebrahimi #define VMA_CLASS_NO_COPY(className) \
3434*b7893ccfSSadaf Ebrahimi private: \
3435*b7893ccfSSadaf Ebrahimi className(const className&) = delete; \
3436*b7893ccfSSadaf Ebrahimi className& operator=(const className&) = delete;
3437*b7893ccfSSadaf Ebrahimi #endif
3438*b7893ccfSSadaf Ebrahimi
3439*b7893ccfSSadaf Ebrahimi static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3440*b7893ccfSSadaf Ebrahimi
3441*b7893ccfSSadaf Ebrahimi // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3442*b7893ccfSSadaf Ebrahimi static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3443*b7893ccfSSadaf Ebrahimi
3444*b7893ccfSSadaf Ebrahimi static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3445*b7893ccfSSadaf Ebrahimi static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3446*b7893ccfSSadaf Ebrahimi
3447*b7893ccfSSadaf Ebrahimi /*******************************************************************************
3448*b7893ccfSSadaf Ebrahimi END OF CONFIGURATION
3449*b7893ccfSSadaf Ebrahimi */
3450*b7893ccfSSadaf Ebrahimi
3451*b7893ccfSSadaf Ebrahimi #if defined(__GNUC__)
3452*b7893ccfSSadaf Ebrahimi #define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
3453*b7893ccfSSadaf Ebrahimi #pragma GCC diagnostic push
3454*b7893ccfSSadaf Ebrahimi #pragma GCC diagnostic ignored "-Wtype-limits"
3455*b7893ccfSSadaf Ebrahimi #pragma GCC diagnostic ignored "-Wunused-variable"
3456*b7893ccfSSadaf Ebrahimi #if defined(__clang__)
3457*b7893ccfSSadaf Ebrahimi #pragma clang diagnostic push
3458*b7893ccfSSadaf Ebrahimi #pragma clang diagnostic ignored "-Wtautological-compare"
3459*b7893ccfSSadaf Ebrahimi #endif
3460*b7893ccfSSadaf Ebrahimi #if GCC_VERSION >= 80000
3461*b7893ccfSSadaf Ebrahimi #pragma GCC diagnostic ignored "-Wclass-memaccess"
3462*b7893ccfSSadaf Ebrahimi #endif
3463*b7893ccfSSadaf Ebrahimi #if defined(ANDROID)
3464*b7893ccfSSadaf Ebrahimi #pragma GCC diagnostic ignored "-Wunused-private-field"
3465*b7893ccfSSadaf Ebrahimi #endif
3466*b7893ccfSSadaf Ebrahimi #endif
3467*b7893ccfSSadaf Ebrahimi static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3468*b7893ccfSSadaf Ebrahimi
3469*b7893ccfSSadaf Ebrahimi static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3470*b7893ccfSSadaf Ebrahimi VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3471*b7893ccfSSadaf Ebrahimi
3472*b7893ccfSSadaf Ebrahimi // Returns number of bits set to 1 in (v).
VmaCountBitsSet(uint32_t v)3473*b7893ccfSSadaf Ebrahimi static inline uint32_t VmaCountBitsSet(uint32_t v)
3474*b7893ccfSSadaf Ebrahimi {
3475*b7893ccfSSadaf Ebrahimi uint32_t c = v - ((v >> 1) & 0x55555555);
3476*b7893ccfSSadaf Ebrahimi c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3477*b7893ccfSSadaf Ebrahimi c = ((c >> 4) + c) & 0x0F0F0F0F;
3478*b7893ccfSSadaf Ebrahimi c = ((c >> 8) + c) & 0x00FF00FF;
3479*b7893ccfSSadaf Ebrahimi c = ((c >> 16) + c) & 0x0000FFFF;
3480*b7893ccfSSadaf Ebrahimi return c;
3481*b7893ccfSSadaf Ebrahimi }
3482*b7893ccfSSadaf Ebrahimi
3483*b7893ccfSSadaf Ebrahimi // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3484*b7893ccfSSadaf Ebrahimi // Use types like uint32_t, uint64_t as T.
3485*b7893ccfSSadaf Ebrahimi template <typename T>
VmaAlignUp(T val,T align)3486*b7893ccfSSadaf Ebrahimi static inline T VmaAlignUp(T val, T align)
3487*b7893ccfSSadaf Ebrahimi {
3488*b7893ccfSSadaf Ebrahimi return (val + align - 1) / align * align;
3489*b7893ccfSSadaf Ebrahimi }
3490*b7893ccfSSadaf Ebrahimi // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3491*b7893ccfSSadaf Ebrahimi // Use types like uint32_t, uint64_t as T.
3492*b7893ccfSSadaf Ebrahimi template <typename T>
VmaAlignDown(T val,T align)3493*b7893ccfSSadaf Ebrahimi static inline T VmaAlignDown(T val, T align)
3494*b7893ccfSSadaf Ebrahimi {
3495*b7893ccfSSadaf Ebrahimi return val / align * align;
3496*b7893ccfSSadaf Ebrahimi }
3497*b7893ccfSSadaf Ebrahimi
3498*b7893ccfSSadaf Ebrahimi // Division with mathematical rounding to nearest number.
3499*b7893ccfSSadaf Ebrahimi template <typename T>
VmaRoundDiv(T x,T y)3500*b7893ccfSSadaf Ebrahimi static inline T VmaRoundDiv(T x, T y)
3501*b7893ccfSSadaf Ebrahimi {
3502*b7893ccfSSadaf Ebrahimi return (x + (y / (T)2)) / y;
3503*b7893ccfSSadaf Ebrahimi }
3504*b7893ccfSSadaf Ebrahimi
3505*b7893ccfSSadaf Ebrahimi /*
3506*b7893ccfSSadaf Ebrahimi Returns true if given number is a power of two.
3507*b7893ccfSSadaf Ebrahimi T must be unsigned integer number or signed integer but always nonnegative.
3508*b7893ccfSSadaf Ebrahimi For 0 returns true.
3509*b7893ccfSSadaf Ebrahimi */
3510*b7893ccfSSadaf Ebrahimi template <typename T>
VmaIsPow2(T x)3511*b7893ccfSSadaf Ebrahimi inline bool VmaIsPow2(T x)
3512*b7893ccfSSadaf Ebrahimi {
3513*b7893ccfSSadaf Ebrahimi return (x & (x-1)) == 0;
3514*b7893ccfSSadaf Ebrahimi }
3515*b7893ccfSSadaf Ebrahimi
3516*b7893ccfSSadaf Ebrahimi // Returns smallest power of 2 greater or equal to v.
VmaNextPow2(uint32_t v)3517*b7893ccfSSadaf Ebrahimi static inline uint32_t VmaNextPow2(uint32_t v)
3518*b7893ccfSSadaf Ebrahimi {
3519*b7893ccfSSadaf Ebrahimi v--;
3520*b7893ccfSSadaf Ebrahimi v |= v >> 1;
3521*b7893ccfSSadaf Ebrahimi v |= v >> 2;
3522*b7893ccfSSadaf Ebrahimi v |= v >> 4;
3523*b7893ccfSSadaf Ebrahimi v |= v >> 8;
3524*b7893ccfSSadaf Ebrahimi v |= v >> 16;
3525*b7893ccfSSadaf Ebrahimi v++;
3526*b7893ccfSSadaf Ebrahimi return v;
3527*b7893ccfSSadaf Ebrahimi }
VmaNextPow2(uint64_t v)3528*b7893ccfSSadaf Ebrahimi static inline uint64_t VmaNextPow2(uint64_t v)
3529*b7893ccfSSadaf Ebrahimi {
3530*b7893ccfSSadaf Ebrahimi v--;
3531*b7893ccfSSadaf Ebrahimi v |= v >> 1;
3532*b7893ccfSSadaf Ebrahimi v |= v >> 2;
3533*b7893ccfSSadaf Ebrahimi v |= v >> 4;
3534*b7893ccfSSadaf Ebrahimi v |= v >> 8;
3535*b7893ccfSSadaf Ebrahimi v |= v >> 16;
3536*b7893ccfSSadaf Ebrahimi v |= v >> 32;
3537*b7893ccfSSadaf Ebrahimi v++;
3538*b7893ccfSSadaf Ebrahimi return v;
3539*b7893ccfSSadaf Ebrahimi }
3540*b7893ccfSSadaf Ebrahimi
3541*b7893ccfSSadaf Ebrahimi // Returns largest power of 2 less or equal to v.
VmaPrevPow2(uint32_t v)3542*b7893ccfSSadaf Ebrahimi static inline uint32_t VmaPrevPow2(uint32_t v)
3543*b7893ccfSSadaf Ebrahimi {
3544*b7893ccfSSadaf Ebrahimi v |= v >> 1;
3545*b7893ccfSSadaf Ebrahimi v |= v >> 2;
3546*b7893ccfSSadaf Ebrahimi v |= v >> 4;
3547*b7893ccfSSadaf Ebrahimi v |= v >> 8;
3548*b7893ccfSSadaf Ebrahimi v |= v >> 16;
3549*b7893ccfSSadaf Ebrahimi v = v ^ (v >> 1);
3550*b7893ccfSSadaf Ebrahimi return v;
3551*b7893ccfSSadaf Ebrahimi }
VmaPrevPow2(uint64_t v)3552*b7893ccfSSadaf Ebrahimi static inline uint64_t VmaPrevPow2(uint64_t v)
3553*b7893ccfSSadaf Ebrahimi {
3554*b7893ccfSSadaf Ebrahimi v |= v >> 1;
3555*b7893ccfSSadaf Ebrahimi v |= v >> 2;
3556*b7893ccfSSadaf Ebrahimi v |= v >> 4;
3557*b7893ccfSSadaf Ebrahimi v |= v >> 8;
3558*b7893ccfSSadaf Ebrahimi v |= v >> 16;
3559*b7893ccfSSadaf Ebrahimi v |= v >> 32;
3560*b7893ccfSSadaf Ebrahimi v = v ^ (v >> 1);
3561*b7893ccfSSadaf Ebrahimi return v;
3562*b7893ccfSSadaf Ebrahimi }
3563*b7893ccfSSadaf Ebrahimi
VmaStrIsEmpty(const char * pStr)3564*b7893ccfSSadaf Ebrahimi static inline bool VmaStrIsEmpty(const char* pStr)
3565*b7893ccfSSadaf Ebrahimi {
3566*b7893ccfSSadaf Ebrahimi return pStr == VMA_NULL || *pStr == '\0';
3567*b7893ccfSSadaf Ebrahimi }
3568*b7893ccfSSadaf Ebrahimi
VmaAlgorithmToStr(uint32_t algorithm)3569*b7893ccfSSadaf Ebrahimi static const char* VmaAlgorithmToStr(uint32_t algorithm)
3570*b7893ccfSSadaf Ebrahimi {
3571*b7893ccfSSadaf Ebrahimi switch(algorithm)
3572*b7893ccfSSadaf Ebrahimi {
3573*b7893ccfSSadaf Ebrahimi case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT:
3574*b7893ccfSSadaf Ebrahimi return "Linear";
3575*b7893ccfSSadaf Ebrahimi case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT:
3576*b7893ccfSSadaf Ebrahimi return "Buddy";
3577*b7893ccfSSadaf Ebrahimi case 0:
3578*b7893ccfSSadaf Ebrahimi return "Default";
3579*b7893ccfSSadaf Ebrahimi default:
3580*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
3581*b7893ccfSSadaf Ebrahimi return "";
3582*b7893ccfSSadaf Ebrahimi }
3583*b7893ccfSSadaf Ebrahimi }
3584*b7893ccfSSadaf Ebrahimi
3585*b7893ccfSSadaf Ebrahimi #ifndef VMA_SORT
3586*b7893ccfSSadaf Ebrahimi
3587*b7893ccfSSadaf Ebrahimi template<typename Iterator, typename Compare>
VmaQuickSortPartition(Iterator beg,Iterator end,Compare cmp)3588*b7893ccfSSadaf Ebrahimi Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3589*b7893ccfSSadaf Ebrahimi {
3590*b7893ccfSSadaf Ebrahimi Iterator centerValue = end; --centerValue;
3591*b7893ccfSSadaf Ebrahimi Iterator insertIndex = beg;
3592*b7893ccfSSadaf Ebrahimi for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3593*b7893ccfSSadaf Ebrahimi {
3594*b7893ccfSSadaf Ebrahimi if(cmp(*memTypeIndex, *centerValue))
3595*b7893ccfSSadaf Ebrahimi {
3596*b7893ccfSSadaf Ebrahimi if(insertIndex != memTypeIndex)
3597*b7893ccfSSadaf Ebrahimi {
3598*b7893ccfSSadaf Ebrahimi VMA_SWAP(*memTypeIndex, *insertIndex);
3599*b7893ccfSSadaf Ebrahimi }
3600*b7893ccfSSadaf Ebrahimi ++insertIndex;
3601*b7893ccfSSadaf Ebrahimi }
3602*b7893ccfSSadaf Ebrahimi }
3603*b7893ccfSSadaf Ebrahimi if(insertIndex != centerValue)
3604*b7893ccfSSadaf Ebrahimi {
3605*b7893ccfSSadaf Ebrahimi VMA_SWAP(*insertIndex, *centerValue);
3606*b7893ccfSSadaf Ebrahimi }
3607*b7893ccfSSadaf Ebrahimi return insertIndex;
3608*b7893ccfSSadaf Ebrahimi }
3609*b7893ccfSSadaf Ebrahimi
3610*b7893ccfSSadaf Ebrahimi template<typename Iterator, typename Compare>
VmaQuickSort(Iterator beg,Iterator end,Compare cmp)3611*b7893ccfSSadaf Ebrahimi void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3612*b7893ccfSSadaf Ebrahimi {
3613*b7893ccfSSadaf Ebrahimi if(beg < end)
3614*b7893ccfSSadaf Ebrahimi {
3615*b7893ccfSSadaf Ebrahimi Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3616*b7893ccfSSadaf Ebrahimi VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3617*b7893ccfSSadaf Ebrahimi VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3618*b7893ccfSSadaf Ebrahimi }
3619*b7893ccfSSadaf Ebrahimi }
3620*b7893ccfSSadaf Ebrahimi
3621*b7893ccfSSadaf Ebrahimi #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3622*b7893ccfSSadaf Ebrahimi
3623*b7893ccfSSadaf Ebrahimi #endif // #ifndef VMA_SORT
3624*b7893ccfSSadaf Ebrahimi
3625*b7893ccfSSadaf Ebrahimi /*
3626*b7893ccfSSadaf Ebrahimi Returns true if two memory blocks occupy overlapping pages.
3627*b7893ccfSSadaf Ebrahimi ResourceA must be in less memory offset than ResourceB.
3628*b7893ccfSSadaf Ebrahimi
3629*b7893ccfSSadaf Ebrahimi Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3630*b7893ccfSSadaf Ebrahimi chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3631*b7893ccfSSadaf Ebrahimi */
VmaBlocksOnSamePage(VkDeviceSize resourceAOffset,VkDeviceSize resourceASize,VkDeviceSize resourceBOffset,VkDeviceSize pageSize)3632*b7893ccfSSadaf Ebrahimi static inline bool VmaBlocksOnSamePage(
3633*b7893ccfSSadaf Ebrahimi VkDeviceSize resourceAOffset,
3634*b7893ccfSSadaf Ebrahimi VkDeviceSize resourceASize,
3635*b7893ccfSSadaf Ebrahimi VkDeviceSize resourceBOffset,
3636*b7893ccfSSadaf Ebrahimi VkDeviceSize pageSize)
3637*b7893ccfSSadaf Ebrahimi {
3638*b7893ccfSSadaf Ebrahimi VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3639*b7893ccfSSadaf Ebrahimi VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3640*b7893ccfSSadaf Ebrahimi VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3641*b7893ccfSSadaf Ebrahimi VkDeviceSize resourceBStart = resourceBOffset;
3642*b7893ccfSSadaf Ebrahimi VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3643*b7893ccfSSadaf Ebrahimi return resourceAEndPage == resourceBStartPage;
3644*b7893ccfSSadaf Ebrahimi }
3645*b7893ccfSSadaf Ebrahimi
3646*b7893ccfSSadaf Ebrahimi enum VmaSuballocationType
3647*b7893ccfSSadaf Ebrahimi {
3648*b7893ccfSSadaf Ebrahimi VMA_SUBALLOCATION_TYPE_FREE = 0,
3649*b7893ccfSSadaf Ebrahimi VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3650*b7893ccfSSadaf Ebrahimi VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3651*b7893ccfSSadaf Ebrahimi VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3652*b7893ccfSSadaf Ebrahimi VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3653*b7893ccfSSadaf Ebrahimi VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3654*b7893ccfSSadaf Ebrahimi VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3655*b7893ccfSSadaf Ebrahimi };
3656*b7893ccfSSadaf Ebrahimi
3657*b7893ccfSSadaf Ebrahimi /*
3658*b7893ccfSSadaf Ebrahimi Returns true if given suballocation types could conflict and must respect
3659*b7893ccfSSadaf Ebrahimi VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3660*b7893ccfSSadaf Ebrahimi or linear image and another one is optimal image. If type is unknown, behave
3661*b7893ccfSSadaf Ebrahimi conservatively.
3662*b7893ccfSSadaf Ebrahimi */
VmaIsBufferImageGranularityConflict(VmaSuballocationType suballocType1,VmaSuballocationType suballocType2)3663*b7893ccfSSadaf Ebrahimi static inline bool VmaIsBufferImageGranularityConflict(
3664*b7893ccfSSadaf Ebrahimi VmaSuballocationType suballocType1,
3665*b7893ccfSSadaf Ebrahimi VmaSuballocationType suballocType2)
3666*b7893ccfSSadaf Ebrahimi {
3667*b7893ccfSSadaf Ebrahimi if(suballocType1 > suballocType2)
3668*b7893ccfSSadaf Ebrahimi {
3669*b7893ccfSSadaf Ebrahimi VMA_SWAP(suballocType1, suballocType2);
3670*b7893ccfSSadaf Ebrahimi }
3671*b7893ccfSSadaf Ebrahimi
3672*b7893ccfSSadaf Ebrahimi switch(suballocType1)
3673*b7893ccfSSadaf Ebrahimi {
3674*b7893ccfSSadaf Ebrahimi case VMA_SUBALLOCATION_TYPE_FREE:
3675*b7893ccfSSadaf Ebrahimi return false;
3676*b7893ccfSSadaf Ebrahimi case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3677*b7893ccfSSadaf Ebrahimi return true;
3678*b7893ccfSSadaf Ebrahimi case VMA_SUBALLOCATION_TYPE_BUFFER:
3679*b7893ccfSSadaf Ebrahimi return
3680*b7893ccfSSadaf Ebrahimi suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3681*b7893ccfSSadaf Ebrahimi suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3682*b7893ccfSSadaf Ebrahimi case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3683*b7893ccfSSadaf Ebrahimi return
3684*b7893ccfSSadaf Ebrahimi suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3685*b7893ccfSSadaf Ebrahimi suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3686*b7893ccfSSadaf Ebrahimi suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3687*b7893ccfSSadaf Ebrahimi case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3688*b7893ccfSSadaf Ebrahimi return
3689*b7893ccfSSadaf Ebrahimi suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3690*b7893ccfSSadaf Ebrahimi case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3691*b7893ccfSSadaf Ebrahimi return false;
3692*b7893ccfSSadaf Ebrahimi default:
3693*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
3694*b7893ccfSSadaf Ebrahimi return true;
3695*b7893ccfSSadaf Ebrahimi }
3696*b7893ccfSSadaf Ebrahimi }
3697*b7893ccfSSadaf Ebrahimi
VmaWriteMagicValue(void * pData,VkDeviceSize offset)3698*b7893ccfSSadaf Ebrahimi static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3699*b7893ccfSSadaf Ebrahimi {
3700*b7893ccfSSadaf Ebrahimi #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
3701*b7893ccfSSadaf Ebrahimi uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3702*b7893ccfSSadaf Ebrahimi const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3703*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < numberCount; ++i, ++pDst)
3704*b7893ccfSSadaf Ebrahimi {
3705*b7893ccfSSadaf Ebrahimi *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3706*b7893ccfSSadaf Ebrahimi }
3707*b7893ccfSSadaf Ebrahimi #else
3708*b7893ccfSSadaf Ebrahimi // no-op
3709*b7893ccfSSadaf Ebrahimi #endif
3710*b7893ccfSSadaf Ebrahimi }
3711*b7893ccfSSadaf Ebrahimi
VmaValidateMagicValue(const void * pData,VkDeviceSize offset)3712*b7893ccfSSadaf Ebrahimi static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3713*b7893ccfSSadaf Ebrahimi {
3714*b7893ccfSSadaf Ebrahimi #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
3715*b7893ccfSSadaf Ebrahimi const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3716*b7893ccfSSadaf Ebrahimi const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3717*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3718*b7893ccfSSadaf Ebrahimi {
3719*b7893ccfSSadaf Ebrahimi if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3720*b7893ccfSSadaf Ebrahimi {
3721*b7893ccfSSadaf Ebrahimi return false;
3722*b7893ccfSSadaf Ebrahimi }
3723*b7893ccfSSadaf Ebrahimi }
3724*b7893ccfSSadaf Ebrahimi #endif
3725*b7893ccfSSadaf Ebrahimi return true;
3726*b7893ccfSSadaf Ebrahimi }
3727*b7893ccfSSadaf Ebrahimi
3728*b7893ccfSSadaf Ebrahimi // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3729*b7893ccfSSadaf Ebrahimi struct VmaMutexLock
3730*b7893ccfSSadaf Ebrahimi {
VMA_CLASS_NO_COPYVmaMutexLock3731*b7893ccfSSadaf Ebrahimi VMA_CLASS_NO_COPY(VmaMutexLock)
3732*b7893ccfSSadaf Ebrahimi public:
3733*b7893ccfSSadaf Ebrahimi VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3734*b7893ccfSSadaf Ebrahimi m_pMutex(useMutex ? &mutex : VMA_NULL)
3735*b7893ccfSSadaf Ebrahimi { if(m_pMutex) { m_pMutex->Lock(); } }
~VmaMutexLockVmaMutexLock3736*b7893ccfSSadaf Ebrahimi ~VmaMutexLock()
3737*b7893ccfSSadaf Ebrahimi { if(m_pMutex) { m_pMutex->Unlock(); } }
3738*b7893ccfSSadaf Ebrahimi private:
3739*b7893ccfSSadaf Ebrahimi VMA_MUTEX* m_pMutex;
3740*b7893ccfSSadaf Ebrahimi };
3741*b7893ccfSSadaf Ebrahimi
3742*b7893ccfSSadaf Ebrahimi // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3743*b7893ccfSSadaf Ebrahimi struct VmaMutexLockRead
3744*b7893ccfSSadaf Ebrahimi {
VMA_CLASS_NO_COPYVmaMutexLockRead3745*b7893ccfSSadaf Ebrahimi VMA_CLASS_NO_COPY(VmaMutexLockRead)
3746*b7893ccfSSadaf Ebrahimi public:
3747*b7893ccfSSadaf Ebrahimi VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3748*b7893ccfSSadaf Ebrahimi m_pMutex(useMutex ? &mutex : VMA_NULL)
3749*b7893ccfSSadaf Ebrahimi { if(m_pMutex) { m_pMutex->LockRead(); } }
~VmaMutexLockReadVmaMutexLockRead3750*b7893ccfSSadaf Ebrahimi ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3751*b7893ccfSSadaf Ebrahimi private:
3752*b7893ccfSSadaf Ebrahimi VMA_RW_MUTEX* m_pMutex;
3753*b7893ccfSSadaf Ebrahimi };
3754*b7893ccfSSadaf Ebrahimi
3755*b7893ccfSSadaf Ebrahimi // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3756*b7893ccfSSadaf Ebrahimi struct VmaMutexLockWrite
3757*b7893ccfSSadaf Ebrahimi {
VMA_CLASS_NO_COPYVmaMutexLockWrite3758*b7893ccfSSadaf Ebrahimi VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3759*b7893ccfSSadaf Ebrahimi public:
3760*b7893ccfSSadaf Ebrahimi VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3761*b7893ccfSSadaf Ebrahimi m_pMutex(useMutex ? &mutex : VMA_NULL)
3762*b7893ccfSSadaf Ebrahimi { if(m_pMutex) { m_pMutex->LockWrite(); } }
~VmaMutexLockWriteVmaMutexLockWrite3763*b7893ccfSSadaf Ebrahimi ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3764*b7893ccfSSadaf Ebrahimi private:
3765*b7893ccfSSadaf Ebrahimi VMA_RW_MUTEX* m_pMutex;
3766*b7893ccfSSadaf Ebrahimi };
3767*b7893ccfSSadaf Ebrahimi
3768*b7893ccfSSadaf Ebrahimi #if VMA_DEBUG_GLOBAL_MUTEX
3769*b7893ccfSSadaf Ebrahimi static VMA_MUTEX gDebugGlobalMutex;
3770*b7893ccfSSadaf Ebrahimi #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3771*b7893ccfSSadaf Ebrahimi #else
3772*b7893ccfSSadaf Ebrahimi #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3773*b7893ccfSSadaf Ebrahimi #endif
3774*b7893ccfSSadaf Ebrahimi
3775*b7893ccfSSadaf Ebrahimi // Minimum size of a free suballocation to register it in the free suballocation collection.
3776*b7893ccfSSadaf Ebrahimi static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3777*b7893ccfSSadaf Ebrahimi
3778*b7893ccfSSadaf Ebrahimi /*
3779*b7893ccfSSadaf Ebrahimi Performs binary search and returns iterator to first element that is greater or
3780*b7893ccfSSadaf Ebrahimi equal to (key), according to comparison (cmp).
3781*b7893ccfSSadaf Ebrahimi
3782*b7893ccfSSadaf Ebrahimi Cmp should return true if first argument is less than second argument.
3783*b7893ccfSSadaf Ebrahimi
3784*b7893ccfSSadaf Ebrahimi Returned value is the found element, if present in the collection or place where
3785*b7893ccfSSadaf Ebrahimi new element with value (key) should be inserted.
3786*b7893ccfSSadaf Ebrahimi */
3787*b7893ccfSSadaf Ebrahimi template <typename CmpLess, typename IterT, typename KeyT>
VmaBinaryFindFirstNotLess(IterT beg,IterT end,const KeyT & key,CmpLess cmp)3788*b7893ccfSSadaf Ebrahimi static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3789*b7893ccfSSadaf Ebrahimi {
3790*b7893ccfSSadaf Ebrahimi size_t down = 0, up = (end - beg);
3791*b7893ccfSSadaf Ebrahimi while(down < up)
3792*b7893ccfSSadaf Ebrahimi {
3793*b7893ccfSSadaf Ebrahimi const size_t mid = (down + up) / 2;
3794*b7893ccfSSadaf Ebrahimi if(cmp(*(beg+mid), key))
3795*b7893ccfSSadaf Ebrahimi {
3796*b7893ccfSSadaf Ebrahimi down = mid + 1;
3797*b7893ccfSSadaf Ebrahimi }
3798*b7893ccfSSadaf Ebrahimi else
3799*b7893ccfSSadaf Ebrahimi {
3800*b7893ccfSSadaf Ebrahimi up = mid;
3801*b7893ccfSSadaf Ebrahimi }
3802*b7893ccfSSadaf Ebrahimi }
3803*b7893ccfSSadaf Ebrahimi return beg + down;
3804*b7893ccfSSadaf Ebrahimi }
3805*b7893ccfSSadaf Ebrahimi
3806*b7893ccfSSadaf Ebrahimi /*
3807*b7893ccfSSadaf Ebrahimi Returns true if all pointers in the array are not-null and unique.
3808*b7893ccfSSadaf Ebrahimi Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3809*b7893ccfSSadaf Ebrahimi T must be pointer type, e.g. VmaAllocation, VmaPool.
3810*b7893ccfSSadaf Ebrahimi */
3811*b7893ccfSSadaf Ebrahimi template<typename T>
VmaValidatePointerArray(uint32_t count,const T * arr)3812*b7893ccfSSadaf Ebrahimi static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3813*b7893ccfSSadaf Ebrahimi {
3814*b7893ccfSSadaf Ebrahimi for(uint32_t i = 0; i < count; ++i)
3815*b7893ccfSSadaf Ebrahimi {
3816*b7893ccfSSadaf Ebrahimi const T iPtr = arr[i];
3817*b7893ccfSSadaf Ebrahimi if(iPtr == VMA_NULL)
3818*b7893ccfSSadaf Ebrahimi {
3819*b7893ccfSSadaf Ebrahimi return false;
3820*b7893ccfSSadaf Ebrahimi }
3821*b7893ccfSSadaf Ebrahimi for(uint32_t j = i + 1; j < count; ++j)
3822*b7893ccfSSadaf Ebrahimi {
3823*b7893ccfSSadaf Ebrahimi if(iPtr == arr[j])
3824*b7893ccfSSadaf Ebrahimi {
3825*b7893ccfSSadaf Ebrahimi return false;
3826*b7893ccfSSadaf Ebrahimi }
3827*b7893ccfSSadaf Ebrahimi }
3828*b7893ccfSSadaf Ebrahimi }
3829*b7893ccfSSadaf Ebrahimi return true;
3830*b7893ccfSSadaf Ebrahimi }
3831*b7893ccfSSadaf Ebrahimi
3832*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
3833*b7893ccfSSadaf Ebrahimi // Memory allocation
3834*b7893ccfSSadaf Ebrahimi
VmaMalloc(const VkAllocationCallbacks * pAllocationCallbacks,size_t size,size_t alignment)3835*b7893ccfSSadaf Ebrahimi static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3836*b7893ccfSSadaf Ebrahimi {
3837*b7893ccfSSadaf Ebrahimi if((pAllocationCallbacks != VMA_NULL) &&
3838*b7893ccfSSadaf Ebrahimi (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3839*b7893ccfSSadaf Ebrahimi {
3840*b7893ccfSSadaf Ebrahimi return (*pAllocationCallbacks->pfnAllocation)(
3841*b7893ccfSSadaf Ebrahimi pAllocationCallbacks->pUserData,
3842*b7893ccfSSadaf Ebrahimi size,
3843*b7893ccfSSadaf Ebrahimi alignment,
3844*b7893ccfSSadaf Ebrahimi VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3845*b7893ccfSSadaf Ebrahimi }
3846*b7893ccfSSadaf Ebrahimi else
3847*b7893ccfSSadaf Ebrahimi {
3848*b7893ccfSSadaf Ebrahimi return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3849*b7893ccfSSadaf Ebrahimi }
3850*b7893ccfSSadaf Ebrahimi }
3851*b7893ccfSSadaf Ebrahimi
VmaFree(const VkAllocationCallbacks * pAllocationCallbacks,void * ptr)3852*b7893ccfSSadaf Ebrahimi static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3853*b7893ccfSSadaf Ebrahimi {
3854*b7893ccfSSadaf Ebrahimi if((pAllocationCallbacks != VMA_NULL) &&
3855*b7893ccfSSadaf Ebrahimi (pAllocationCallbacks->pfnFree != VMA_NULL))
3856*b7893ccfSSadaf Ebrahimi {
3857*b7893ccfSSadaf Ebrahimi (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3858*b7893ccfSSadaf Ebrahimi }
3859*b7893ccfSSadaf Ebrahimi else
3860*b7893ccfSSadaf Ebrahimi {
3861*b7893ccfSSadaf Ebrahimi VMA_SYSTEM_FREE(ptr);
3862*b7893ccfSSadaf Ebrahimi }
3863*b7893ccfSSadaf Ebrahimi }
3864*b7893ccfSSadaf Ebrahimi
3865*b7893ccfSSadaf Ebrahimi template<typename T>
VmaAllocate(const VkAllocationCallbacks * pAllocationCallbacks)3866*b7893ccfSSadaf Ebrahimi static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3867*b7893ccfSSadaf Ebrahimi {
3868*b7893ccfSSadaf Ebrahimi return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3869*b7893ccfSSadaf Ebrahimi }
3870*b7893ccfSSadaf Ebrahimi
3871*b7893ccfSSadaf Ebrahimi template<typename T>
VmaAllocateArray(const VkAllocationCallbacks * pAllocationCallbacks,size_t count)3872*b7893ccfSSadaf Ebrahimi static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3873*b7893ccfSSadaf Ebrahimi {
3874*b7893ccfSSadaf Ebrahimi return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3875*b7893ccfSSadaf Ebrahimi }
3876*b7893ccfSSadaf Ebrahimi
3877*b7893ccfSSadaf Ebrahimi #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3878*b7893ccfSSadaf Ebrahimi
3879*b7893ccfSSadaf Ebrahimi #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3880*b7893ccfSSadaf Ebrahimi
3881*b7893ccfSSadaf Ebrahimi template<typename T>
vma_delete(const VkAllocationCallbacks * pAllocationCallbacks,T * ptr)3882*b7893ccfSSadaf Ebrahimi static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3883*b7893ccfSSadaf Ebrahimi {
3884*b7893ccfSSadaf Ebrahimi ptr->~T();
3885*b7893ccfSSadaf Ebrahimi VmaFree(pAllocationCallbacks, ptr);
3886*b7893ccfSSadaf Ebrahimi }
3887*b7893ccfSSadaf Ebrahimi
3888*b7893ccfSSadaf Ebrahimi template<typename T>
vma_delete_array(const VkAllocationCallbacks * pAllocationCallbacks,T * ptr,size_t count)3889*b7893ccfSSadaf Ebrahimi static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3890*b7893ccfSSadaf Ebrahimi {
3891*b7893ccfSSadaf Ebrahimi if(ptr != VMA_NULL)
3892*b7893ccfSSadaf Ebrahimi {
3893*b7893ccfSSadaf Ebrahimi for(size_t i = count; i--; )
3894*b7893ccfSSadaf Ebrahimi {
3895*b7893ccfSSadaf Ebrahimi ptr[i].~T();
3896*b7893ccfSSadaf Ebrahimi }
3897*b7893ccfSSadaf Ebrahimi VmaFree(pAllocationCallbacks, ptr);
3898*b7893ccfSSadaf Ebrahimi }
3899*b7893ccfSSadaf Ebrahimi }
3900*b7893ccfSSadaf Ebrahimi
3901*b7893ccfSSadaf Ebrahimi // STL-compatible allocator.
3902*b7893ccfSSadaf Ebrahimi template<typename T>
3903*b7893ccfSSadaf Ebrahimi class VmaStlAllocator
3904*b7893ccfSSadaf Ebrahimi {
3905*b7893ccfSSadaf Ebrahimi public:
3906*b7893ccfSSadaf Ebrahimi const VkAllocationCallbacks* const m_pCallbacks;
3907*b7893ccfSSadaf Ebrahimi typedef T value_type;
3908*b7893ccfSSadaf Ebrahimi
VmaStlAllocator(const VkAllocationCallbacks * pCallbacks)3909*b7893ccfSSadaf Ebrahimi VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
VmaStlAllocator(const VmaStlAllocator<U> & src)3910*b7893ccfSSadaf Ebrahimi template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3911*b7893ccfSSadaf Ebrahimi
allocate(size_t n)3912*b7893ccfSSadaf Ebrahimi T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
deallocate(T * p,size_t n)3913*b7893ccfSSadaf Ebrahimi void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3914*b7893ccfSSadaf Ebrahimi
3915*b7893ccfSSadaf Ebrahimi template<typename U>
3916*b7893ccfSSadaf Ebrahimi bool operator==(const VmaStlAllocator<U>& rhs) const
3917*b7893ccfSSadaf Ebrahimi {
3918*b7893ccfSSadaf Ebrahimi return m_pCallbacks == rhs.m_pCallbacks;
3919*b7893ccfSSadaf Ebrahimi }
3920*b7893ccfSSadaf Ebrahimi template<typename U>
3921*b7893ccfSSadaf Ebrahimi bool operator!=(const VmaStlAllocator<U>& rhs) const
3922*b7893ccfSSadaf Ebrahimi {
3923*b7893ccfSSadaf Ebrahimi return m_pCallbacks != rhs.m_pCallbacks;
3924*b7893ccfSSadaf Ebrahimi }
3925*b7893ccfSSadaf Ebrahimi
3926*b7893ccfSSadaf Ebrahimi VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3927*b7893ccfSSadaf Ebrahimi };
3928*b7893ccfSSadaf Ebrahimi
3929*b7893ccfSSadaf Ebrahimi #if VMA_USE_STL_VECTOR
3930*b7893ccfSSadaf Ebrahimi
3931*b7893ccfSSadaf Ebrahimi #define VmaVector std::vector
3932*b7893ccfSSadaf Ebrahimi
3933*b7893ccfSSadaf Ebrahimi template<typename T, typename allocatorT>
VmaVectorInsert(std::vector<T,allocatorT> & vec,size_t index,const T & item)3934*b7893ccfSSadaf Ebrahimi static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3935*b7893ccfSSadaf Ebrahimi {
3936*b7893ccfSSadaf Ebrahimi vec.insert(vec.begin() + index, item);
3937*b7893ccfSSadaf Ebrahimi }
3938*b7893ccfSSadaf Ebrahimi
3939*b7893ccfSSadaf Ebrahimi template<typename T, typename allocatorT>
VmaVectorRemove(std::vector<T,allocatorT> & vec,size_t index)3940*b7893ccfSSadaf Ebrahimi static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3941*b7893ccfSSadaf Ebrahimi {
3942*b7893ccfSSadaf Ebrahimi vec.erase(vec.begin() + index);
3943*b7893ccfSSadaf Ebrahimi }
3944*b7893ccfSSadaf Ebrahimi
3945*b7893ccfSSadaf Ebrahimi #else // #if VMA_USE_STL_VECTOR
3946*b7893ccfSSadaf Ebrahimi
3947*b7893ccfSSadaf Ebrahimi /* Class with interface compatible with subset of std::vector.
3948*b7893ccfSSadaf Ebrahimi T must be POD because constructors and destructors are not called and memcpy is
3949*b7893ccfSSadaf Ebrahimi used for these objects. */
3950*b7893ccfSSadaf Ebrahimi template<typename T, typename AllocatorT>
3951*b7893ccfSSadaf Ebrahimi class VmaVector
3952*b7893ccfSSadaf Ebrahimi {
3953*b7893ccfSSadaf Ebrahimi public:
3954*b7893ccfSSadaf Ebrahimi typedef T value_type;
3955*b7893ccfSSadaf Ebrahimi
VmaVector(const AllocatorT & allocator)3956*b7893ccfSSadaf Ebrahimi VmaVector(const AllocatorT& allocator) :
3957*b7893ccfSSadaf Ebrahimi m_Allocator(allocator),
3958*b7893ccfSSadaf Ebrahimi m_pArray(VMA_NULL),
3959*b7893ccfSSadaf Ebrahimi m_Count(0),
3960*b7893ccfSSadaf Ebrahimi m_Capacity(0)
3961*b7893ccfSSadaf Ebrahimi {
3962*b7893ccfSSadaf Ebrahimi }
3963*b7893ccfSSadaf Ebrahimi
VmaVector(size_t count,const AllocatorT & allocator)3964*b7893ccfSSadaf Ebrahimi VmaVector(size_t count, const AllocatorT& allocator) :
3965*b7893ccfSSadaf Ebrahimi m_Allocator(allocator),
3966*b7893ccfSSadaf Ebrahimi m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3967*b7893ccfSSadaf Ebrahimi m_Count(count),
3968*b7893ccfSSadaf Ebrahimi m_Capacity(count)
3969*b7893ccfSSadaf Ebrahimi {
3970*b7893ccfSSadaf Ebrahimi }
3971*b7893ccfSSadaf Ebrahimi
VmaVector(const VmaVector<T,AllocatorT> & src)3972*b7893ccfSSadaf Ebrahimi VmaVector(const VmaVector<T, AllocatorT>& src) :
3973*b7893ccfSSadaf Ebrahimi m_Allocator(src.m_Allocator),
3974*b7893ccfSSadaf Ebrahimi m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3975*b7893ccfSSadaf Ebrahimi m_Count(src.m_Count),
3976*b7893ccfSSadaf Ebrahimi m_Capacity(src.m_Count)
3977*b7893ccfSSadaf Ebrahimi {
3978*b7893ccfSSadaf Ebrahimi if(m_Count != 0)
3979*b7893ccfSSadaf Ebrahimi {
3980*b7893ccfSSadaf Ebrahimi memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3981*b7893ccfSSadaf Ebrahimi }
3982*b7893ccfSSadaf Ebrahimi }
3983*b7893ccfSSadaf Ebrahimi
~VmaVector()3984*b7893ccfSSadaf Ebrahimi ~VmaVector()
3985*b7893ccfSSadaf Ebrahimi {
3986*b7893ccfSSadaf Ebrahimi VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3987*b7893ccfSSadaf Ebrahimi }
3988*b7893ccfSSadaf Ebrahimi
3989*b7893ccfSSadaf Ebrahimi VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3990*b7893ccfSSadaf Ebrahimi {
3991*b7893ccfSSadaf Ebrahimi if(&rhs != this)
3992*b7893ccfSSadaf Ebrahimi {
3993*b7893ccfSSadaf Ebrahimi resize(rhs.m_Count);
3994*b7893ccfSSadaf Ebrahimi if(m_Count != 0)
3995*b7893ccfSSadaf Ebrahimi {
3996*b7893ccfSSadaf Ebrahimi memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3997*b7893ccfSSadaf Ebrahimi }
3998*b7893ccfSSadaf Ebrahimi }
3999*b7893ccfSSadaf Ebrahimi return *this;
4000*b7893ccfSSadaf Ebrahimi }
4001*b7893ccfSSadaf Ebrahimi
empty()4002*b7893ccfSSadaf Ebrahimi bool empty() const { return m_Count == 0; }
size()4003*b7893ccfSSadaf Ebrahimi size_t size() const { return m_Count; }
data()4004*b7893ccfSSadaf Ebrahimi T* data() { return m_pArray; }
data()4005*b7893ccfSSadaf Ebrahimi const T* data() const { return m_pArray; }
4006*b7893ccfSSadaf Ebrahimi
4007*b7893ccfSSadaf Ebrahimi T& operator[](size_t index)
4008*b7893ccfSSadaf Ebrahimi {
4009*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(index < m_Count);
4010*b7893ccfSSadaf Ebrahimi return m_pArray[index];
4011*b7893ccfSSadaf Ebrahimi }
4012*b7893ccfSSadaf Ebrahimi const T& operator[](size_t index) const
4013*b7893ccfSSadaf Ebrahimi {
4014*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(index < m_Count);
4015*b7893ccfSSadaf Ebrahimi return m_pArray[index];
4016*b7893ccfSSadaf Ebrahimi }
4017*b7893ccfSSadaf Ebrahimi
front()4018*b7893ccfSSadaf Ebrahimi T& front()
4019*b7893ccfSSadaf Ebrahimi {
4020*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_Count > 0);
4021*b7893ccfSSadaf Ebrahimi return m_pArray[0];
4022*b7893ccfSSadaf Ebrahimi }
front()4023*b7893ccfSSadaf Ebrahimi const T& front() const
4024*b7893ccfSSadaf Ebrahimi {
4025*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_Count > 0);
4026*b7893ccfSSadaf Ebrahimi return m_pArray[0];
4027*b7893ccfSSadaf Ebrahimi }
back()4028*b7893ccfSSadaf Ebrahimi T& back()
4029*b7893ccfSSadaf Ebrahimi {
4030*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_Count > 0);
4031*b7893ccfSSadaf Ebrahimi return m_pArray[m_Count - 1];
4032*b7893ccfSSadaf Ebrahimi }
back()4033*b7893ccfSSadaf Ebrahimi const T& back() const
4034*b7893ccfSSadaf Ebrahimi {
4035*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_Count > 0);
4036*b7893ccfSSadaf Ebrahimi return m_pArray[m_Count - 1];
4037*b7893ccfSSadaf Ebrahimi }
4038*b7893ccfSSadaf Ebrahimi
4039*b7893ccfSSadaf Ebrahimi void reserve(size_t newCapacity, bool freeMemory = false)
4040*b7893ccfSSadaf Ebrahimi {
4041*b7893ccfSSadaf Ebrahimi newCapacity = VMA_MAX(newCapacity, m_Count);
4042*b7893ccfSSadaf Ebrahimi
4043*b7893ccfSSadaf Ebrahimi if((newCapacity < m_Capacity) && !freeMemory)
4044*b7893ccfSSadaf Ebrahimi {
4045*b7893ccfSSadaf Ebrahimi newCapacity = m_Capacity;
4046*b7893ccfSSadaf Ebrahimi }
4047*b7893ccfSSadaf Ebrahimi
4048*b7893ccfSSadaf Ebrahimi if(newCapacity != m_Capacity)
4049*b7893ccfSSadaf Ebrahimi {
4050*b7893ccfSSadaf Ebrahimi T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4051*b7893ccfSSadaf Ebrahimi if(m_Count != 0)
4052*b7893ccfSSadaf Ebrahimi {
4053*b7893ccfSSadaf Ebrahimi memcpy(newArray, m_pArray, m_Count * sizeof(T));
4054*b7893ccfSSadaf Ebrahimi }
4055*b7893ccfSSadaf Ebrahimi VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4056*b7893ccfSSadaf Ebrahimi m_Capacity = newCapacity;
4057*b7893ccfSSadaf Ebrahimi m_pArray = newArray;
4058*b7893ccfSSadaf Ebrahimi }
4059*b7893ccfSSadaf Ebrahimi }
4060*b7893ccfSSadaf Ebrahimi
4061*b7893ccfSSadaf Ebrahimi void resize(size_t newCount, bool freeMemory = false)
4062*b7893ccfSSadaf Ebrahimi {
4063*b7893ccfSSadaf Ebrahimi size_t newCapacity = m_Capacity;
4064*b7893ccfSSadaf Ebrahimi if(newCount > m_Capacity)
4065*b7893ccfSSadaf Ebrahimi {
4066*b7893ccfSSadaf Ebrahimi newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4067*b7893ccfSSadaf Ebrahimi }
4068*b7893ccfSSadaf Ebrahimi else if(freeMemory)
4069*b7893ccfSSadaf Ebrahimi {
4070*b7893ccfSSadaf Ebrahimi newCapacity = newCount;
4071*b7893ccfSSadaf Ebrahimi }
4072*b7893ccfSSadaf Ebrahimi
4073*b7893ccfSSadaf Ebrahimi if(newCapacity != m_Capacity)
4074*b7893ccfSSadaf Ebrahimi {
4075*b7893ccfSSadaf Ebrahimi T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4076*b7893ccfSSadaf Ebrahimi const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4077*b7893ccfSSadaf Ebrahimi if(elementsToCopy != 0)
4078*b7893ccfSSadaf Ebrahimi {
4079*b7893ccfSSadaf Ebrahimi memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4080*b7893ccfSSadaf Ebrahimi }
4081*b7893ccfSSadaf Ebrahimi VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4082*b7893ccfSSadaf Ebrahimi m_Capacity = newCapacity;
4083*b7893ccfSSadaf Ebrahimi m_pArray = newArray;
4084*b7893ccfSSadaf Ebrahimi }
4085*b7893ccfSSadaf Ebrahimi
4086*b7893ccfSSadaf Ebrahimi m_Count = newCount;
4087*b7893ccfSSadaf Ebrahimi }
4088*b7893ccfSSadaf Ebrahimi
4089*b7893ccfSSadaf Ebrahimi void clear(bool freeMemory = false)
4090*b7893ccfSSadaf Ebrahimi {
4091*b7893ccfSSadaf Ebrahimi resize(0, freeMemory);
4092*b7893ccfSSadaf Ebrahimi }
4093*b7893ccfSSadaf Ebrahimi
insert(size_t index,const T & src)4094*b7893ccfSSadaf Ebrahimi void insert(size_t index, const T& src)
4095*b7893ccfSSadaf Ebrahimi {
4096*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(index <= m_Count);
4097*b7893ccfSSadaf Ebrahimi const size_t oldCount = size();
4098*b7893ccfSSadaf Ebrahimi resize(oldCount + 1);
4099*b7893ccfSSadaf Ebrahimi if(index < oldCount)
4100*b7893ccfSSadaf Ebrahimi {
4101*b7893ccfSSadaf Ebrahimi memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4102*b7893ccfSSadaf Ebrahimi }
4103*b7893ccfSSadaf Ebrahimi m_pArray[index] = src;
4104*b7893ccfSSadaf Ebrahimi }
4105*b7893ccfSSadaf Ebrahimi
remove(size_t index)4106*b7893ccfSSadaf Ebrahimi void remove(size_t index)
4107*b7893ccfSSadaf Ebrahimi {
4108*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(index < m_Count);
4109*b7893ccfSSadaf Ebrahimi const size_t oldCount = size();
4110*b7893ccfSSadaf Ebrahimi if(index < oldCount - 1)
4111*b7893ccfSSadaf Ebrahimi {
4112*b7893ccfSSadaf Ebrahimi memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4113*b7893ccfSSadaf Ebrahimi }
4114*b7893ccfSSadaf Ebrahimi resize(oldCount - 1);
4115*b7893ccfSSadaf Ebrahimi }
4116*b7893ccfSSadaf Ebrahimi
push_back(const T & src)4117*b7893ccfSSadaf Ebrahimi void push_back(const T& src)
4118*b7893ccfSSadaf Ebrahimi {
4119*b7893ccfSSadaf Ebrahimi const size_t newIndex = size();
4120*b7893ccfSSadaf Ebrahimi resize(newIndex + 1);
4121*b7893ccfSSadaf Ebrahimi m_pArray[newIndex] = src;
4122*b7893ccfSSadaf Ebrahimi }
4123*b7893ccfSSadaf Ebrahimi
pop_back()4124*b7893ccfSSadaf Ebrahimi void pop_back()
4125*b7893ccfSSadaf Ebrahimi {
4126*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_Count > 0);
4127*b7893ccfSSadaf Ebrahimi resize(size() - 1);
4128*b7893ccfSSadaf Ebrahimi }
4129*b7893ccfSSadaf Ebrahimi
push_front(const T & src)4130*b7893ccfSSadaf Ebrahimi void push_front(const T& src)
4131*b7893ccfSSadaf Ebrahimi {
4132*b7893ccfSSadaf Ebrahimi insert(0, src);
4133*b7893ccfSSadaf Ebrahimi }
4134*b7893ccfSSadaf Ebrahimi
pop_front()4135*b7893ccfSSadaf Ebrahimi void pop_front()
4136*b7893ccfSSadaf Ebrahimi {
4137*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_Count > 0);
4138*b7893ccfSSadaf Ebrahimi remove(0);
4139*b7893ccfSSadaf Ebrahimi }
4140*b7893ccfSSadaf Ebrahimi
4141*b7893ccfSSadaf Ebrahimi typedef T* iterator;
4142*b7893ccfSSadaf Ebrahimi
begin()4143*b7893ccfSSadaf Ebrahimi iterator begin() { return m_pArray; }
end()4144*b7893ccfSSadaf Ebrahimi iterator end() { return m_pArray + m_Count; }
4145*b7893ccfSSadaf Ebrahimi
4146*b7893ccfSSadaf Ebrahimi private:
4147*b7893ccfSSadaf Ebrahimi AllocatorT m_Allocator;
4148*b7893ccfSSadaf Ebrahimi T* m_pArray;
4149*b7893ccfSSadaf Ebrahimi size_t m_Count;
4150*b7893ccfSSadaf Ebrahimi size_t m_Capacity;
4151*b7893ccfSSadaf Ebrahimi };
4152*b7893ccfSSadaf Ebrahimi
4153*b7893ccfSSadaf Ebrahimi template<typename T, typename allocatorT>
VmaVectorInsert(VmaVector<T,allocatorT> & vec,size_t index,const T & item)4154*b7893ccfSSadaf Ebrahimi static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4155*b7893ccfSSadaf Ebrahimi {
4156*b7893ccfSSadaf Ebrahimi vec.insert(index, item);
4157*b7893ccfSSadaf Ebrahimi }
4158*b7893ccfSSadaf Ebrahimi
4159*b7893ccfSSadaf Ebrahimi template<typename T, typename allocatorT>
VmaVectorRemove(VmaVector<T,allocatorT> & vec,size_t index)4160*b7893ccfSSadaf Ebrahimi static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4161*b7893ccfSSadaf Ebrahimi {
4162*b7893ccfSSadaf Ebrahimi vec.remove(index);
4163*b7893ccfSSadaf Ebrahimi }
4164*b7893ccfSSadaf Ebrahimi
4165*b7893ccfSSadaf Ebrahimi #endif // #if VMA_USE_STL_VECTOR
4166*b7893ccfSSadaf Ebrahimi
4167*b7893ccfSSadaf Ebrahimi template<typename CmpLess, typename VectorT>
VmaVectorInsertSorted(VectorT & vector,const typename VectorT::value_type & value)4168*b7893ccfSSadaf Ebrahimi size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4169*b7893ccfSSadaf Ebrahimi {
4170*b7893ccfSSadaf Ebrahimi const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4171*b7893ccfSSadaf Ebrahimi vector.data(),
4172*b7893ccfSSadaf Ebrahimi vector.data() + vector.size(),
4173*b7893ccfSSadaf Ebrahimi value,
4174*b7893ccfSSadaf Ebrahimi CmpLess()) - vector.data();
4175*b7893ccfSSadaf Ebrahimi VmaVectorInsert(vector, indexToInsert, value);
4176*b7893ccfSSadaf Ebrahimi return indexToInsert;
4177*b7893ccfSSadaf Ebrahimi }
4178*b7893ccfSSadaf Ebrahimi
4179*b7893ccfSSadaf Ebrahimi template<typename CmpLess, typename VectorT>
VmaVectorRemoveSorted(VectorT & vector,const typename VectorT::value_type & value)4180*b7893ccfSSadaf Ebrahimi bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4181*b7893ccfSSadaf Ebrahimi {
4182*b7893ccfSSadaf Ebrahimi CmpLess comparator;
4183*b7893ccfSSadaf Ebrahimi typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4184*b7893ccfSSadaf Ebrahimi vector.begin(),
4185*b7893ccfSSadaf Ebrahimi vector.end(),
4186*b7893ccfSSadaf Ebrahimi value,
4187*b7893ccfSSadaf Ebrahimi comparator);
4188*b7893ccfSSadaf Ebrahimi if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4189*b7893ccfSSadaf Ebrahimi {
4190*b7893ccfSSadaf Ebrahimi size_t indexToRemove = it - vector.begin();
4191*b7893ccfSSadaf Ebrahimi VmaVectorRemove(vector, indexToRemove);
4192*b7893ccfSSadaf Ebrahimi return true;
4193*b7893ccfSSadaf Ebrahimi }
4194*b7893ccfSSadaf Ebrahimi return false;
4195*b7893ccfSSadaf Ebrahimi }
4196*b7893ccfSSadaf Ebrahimi
4197*b7893ccfSSadaf Ebrahimi template<typename CmpLess, typename IterT, typename KeyT>
VmaVectorFindSorted(const IterT & beg,const IterT & end,const KeyT & value)4198*b7893ccfSSadaf Ebrahimi IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
4199*b7893ccfSSadaf Ebrahimi {
4200*b7893ccfSSadaf Ebrahimi CmpLess comparator;
4201*b7893ccfSSadaf Ebrahimi IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4202*b7893ccfSSadaf Ebrahimi beg, end, value, comparator);
4203*b7893ccfSSadaf Ebrahimi if(it == end ||
4204*b7893ccfSSadaf Ebrahimi (!comparator(*it, value) && !comparator(value, *it)))
4205*b7893ccfSSadaf Ebrahimi {
4206*b7893ccfSSadaf Ebrahimi return it;
4207*b7893ccfSSadaf Ebrahimi }
4208*b7893ccfSSadaf Ebrahimi return end;
4209*b7893ccfSSadaf Ebrahimi }
4210*b7893ccfSSadaf Ebrahimi
4211*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
4212*b7893ccfSSadaf Ebrahimi // class VmaPoolAllocator
4213*b7893ccfSSadaf Ebrahimi
4214*b7893ccfSSadaf Ebrahimi /*
4215*b7893ccfSSadaf Ebrahimi Allocator for objects of type T using a list of arrays (pools) to speed up
4216*b7893ccfSSadaf Ebrahimi allocation. Number of elements that can be allocated is not bounded because
4217*b7893ccfSSadaf Ebrahimi allocator can create multiple blocks.
4218*b7893ccfSSadaf Ebrahimi */
4219*b7893ccfSSadaf Ebrahimi template<typename T>
4220*b7893ccfSSadaf Ebrahimi class VmaPoolAllocator
4221*b7893ccfSSadaf Ebrahimi {
4222*b7893ccfSSadaf Ebrahimi VMA_CLASS_NO_COPY(VmaPoolAllocator)
4223*b7893ccfSSadaf Ebrahimi public:
4224*b7893ccfSSadaf Ebrahimi VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
4225*b7893ccfSSadaf Ebrahimi ~VmaPoolAllocator();
4226*b7893ccfSSadaf Ebrahimi void Clear();
4227*b7893ccfSSadaf Ebrahimi T* Alloc();
4228*b7893ccfSSadaf Ebrahimi void Free(T* ptr);
4229*b7893ccfSSadaf Ebrahimi
4230*b7893ccfSSadaf Ebrahimi private:
4231*b7893ccfSSadaf Ebrahimi union Item
4232*b7893ccfSSadaf Ebrahimi {
4233*b7893ccfSSadaf Ebrahimi uint32_t NextFreeIndex;
4234*b7893ccfSSadaf Ebrahimi T Value;
4235*b7893ccfSSadaf Ebrahimi };
4236*b7893ccfSSadaf Ebrahimi
4237*b7893ccfSSadaf Ebrahimi struct ItemBlock
4238*b7893ccfSSadaf Ebrahimi {
4239*b7893ccfSSadaf Ebrahimi Item* pItems;
4240*b7893ccfSSadaf Ebrahimi uint32_t FirstFreeIndex;
4241*b7893ccfSSadaf Ebrahimi };
4242*b7893ccfSSadaf Ebrahimi
4243*b7893ccfSSadaf Ebrahimi const VkAllocationCallbacks* m_pAllocationCallbacks;
4244*b7893ccfSSadaf Ebrahimi size_t m_ItemsPerBlock;
4245*b7893ccfSSadaf Ebrahimi VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4246*b7893ccfSSadaf Ebrahimi
4247*b7893ccfSSadaf Ebrahimi ItemBlock& CreateNewBlock();
4248*b7893ccfSSadaf Ebrahimi };
4249*b7893ccfSSadaf Ebrahimi
4250*b7893ccfSSadaf Ebrahimi template<typename T>
VmaPoolAllocator(const VkAllocationCallbacks * pAllocationCallbacks,size_t itemsPerBlock)4251*b7893ccfSSadaf Ebrahimi VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
4252*b7893ccfSSadaf Ebrahimi m_pAllocationCallbacks(pAllocationCallbacks),
4253*b7893ccfSSadaf Ebrahimi m_ItemsPerBlock(itemsPerBlock),
4254*b7893ccfSSadaf Ebrahimi m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4255*b7893ccfSSadaf Ebrahimi {
4256*b7893ccfSSadaf Ebrahimi VMA_ASSERT(itemsPerBlock > 0);
4257*b7893ccfSSadaf Ebrahimi }
4258*b7893ccfSSadaf Ebrahimi
4259*b7893ccfSSadaf Ebrahimi template<typename T>
~VmaPoolAllocator()4260*b7893ccfSSadaf Ebrahimi VmaPoolAllocator<T>::~VmaPoolAllocator()
4261*b7893ccfSSadaf Ebrahimi {
4262*b7893ccfSSadaf Ebrahimi Clear();
4263*b7893ccfSSadaf Ebrahimi }
4264*b7893ccfSSadaf Ebrahimi
4265*b7893ccfSSadaf Ebrahimi template<typename T>
Clear()4266*b7893ccfSSadaf Ebrahimi void VmaPoolAllocator<T>::Clear()
4267*b7893ccfSSadaf Ebrahimi {
4268*b7893ccfSSadaf Ebrahimi for(size_t i = m_ItemBlocks.size(); i--; )
4269*b7893ccfSSadaf Ebrahimi vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
4270*b7893ccfSSadaf Ebrahimi m_ItemBlocks.clear();
4271*b7893ccfSSadaf Ebrahimi }
4272*b7893ccfSSadaf Ebrahimi
4273*b7893ccfSSadaf Ebrahimi template<typename T>
Alloc()4274*b7893ccfSSadaf Ebrahimi T* VmaPoolAllocator<T>::Alloc()
4275*b7893ccfSSadaf Ebrahimi {
4276*b7893ccfSSadaf Ebrahimi for(size_t i = m_ItemBlocks.size(); i--; )
4277*b7893ccfSSadaf Ebrahimi {
4278*b7893ccfSSadaf Ebrahimi ItemBlock& block = m_ItemBlocks[i];
4279*b7893ccfSSadaf Ebrahimi // This block has some free items: Use first one.
4280*b7893ccfSSadaf Ebrahimi if(block.FirstFreeIndex != UINT32_MAX)
4281*b7893ccfSSadaf Ebrahimi {
4282*b7893ccfSSadaf Ebrahimi Item* const pItem = &block.pItems[block.FirstFreeIndex];
4283*b7893ccfSSadaf Ebrahimi block.FirstFreeIndex = pItem->NextFreeIndex;
4284*b7893ccfSSadaf Ebrahimi return &pItem->Value;
4285*b7893ccfSSadaf Ebrahimi }
4286*b7893ccfSSadaf Ebrahimi }
4287*b7893ccfSSadaf Ebrahimi
4288*b7893ccfSSadaf Ebrahimi // No block has free item: Create new one and use it.
4289*b7893ccfSSadaf Ebrahimi ItemBlock& newBlock = CreateNewBlock();
4290*b7893ccfSSadaf Ebrahimi Item* const pItem = &newBlock.pItems[0];
4291*b7893ccfSSadaf Ebrahimi newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4292*b7893ccfSSadaf Ebrahimi return &pItem->Value;
4293*b7893ccfSSadaf Ebrahimi }
4294*b7893ccfSSadaf Ebrahimi
4295*b7893ccfSSadaf Ebrahimi template<typename T>
Free(T * ptr)4296*b7893ccfSSadaf Ebrahimi void VmaPoolAllocator<T>::Free(T* ptr)
4297*b7893ccfSSadaf Ebrahimi {
4298*b7893ccfSSadaf Ebrahimi // Search all memory blocks to find ptr.
4299*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
4300*b7893ccfSSadaf Ebrahimi {
4301*b7893ccfSSadaf Ebrahimi ItemBlock& block = m_ItemBlocks[i];
4302*b7893ccfSSadaf Ebrahimi
4303*b7893ccfSSadaf Ebrahimi // Casting to union.
4304*b7893ccfSSadaf Ebrahimi Item* pItemPtr;
4305*b7893ccfSSadaf Ebrahimi memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4306*b7893ccfSSadaf Ebrahimi
4307*b7893ccfSSadaf Ebrahimi // Check if pItemPtr is in address range of this block.
4308*b7893ccfSSadaf Ebrahimi if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
4309*b7893ccfSSadaf Ebrahimi {
4310*b7893ccfSSadaf Ebrahimi const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4311*b7893ccfSSadaf Ebrahimi pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4312*b7893ccfSSadaf Ebrahimi block.FirstFreeIndex = index;
4313*b7893ccfSSadaf Ebrahimi return;
4314*b7893ccfSSadaf Ebrahimi }
4315*b7893ccfSSadaf Ebrahimi }
4316*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4317*b7893ccfSSadaf Ebrahimi }
4318*b7893ccfSSadaf Ebrahimi
4319*b7893ccfSSadaf Ebrahimi template<typename T>
CreateNewBlock()4320*b7893ccfSSadaf Ebrahimi typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4321*b7893ccfSSadaf Ebrahimi {
4322*b7893ccfSSadaf Ebrahimi ItemBlock newBlock = {
4323*b7893ccfSSadaf Ebrahimi vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
4324*b7893ccfSSadaf Ebrahimi
4325*b7893ccfSSadaf Ebrahimi m_ItemBlocks.push_back(newBlock);
4326*b7893ccfSSadaf Ebrahimi
4327*b7893ccfSSadaf Ebrahimi // Setup singly-linked list of all free items in this block.
4328*b7893ccfSSadaf Ebrahimi for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
4329*b7893ccfSSadaf Ebrahimi newBlock.pItems[i].NextFreeIndex = i + 1;
4330*b7893ccfSSadaf Ebrahimi newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
4331*b7893ccfSSadaf Ebrahimi return m_ItemBlocks.back();
4332*b7893ccfSSadaf Ebrahimi }
4333*b7893ccfSSadaf Ebrahimi
4334*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
4335*b7893ccfSSadaf Ebrahimi // class VmaRawList, VmaList
4336*b7893ccfSSadaf Ebrahimi
4337*b7893ccfSSadaf Ebrahimi #if VMA_USE_STL_LIST
4338*b7893ccfSSadaf Ebrahimi
4339*b7893ccfSSadaf Ebrahimi #define VmaList std::list
4340*b7893ccfSSadaf Ebrahimi
4341*b7893ccfSSadaf Ebrahimi #else // #if VMA_USE_STL_LIST
4342*b7893ccfSSadaf Ebrahimi
4343*b7893ccfSSadaf Ebrahimi template<typename T>
4344*b7893ccfSSadaf Ebrahimi struct VmaListItem
4345*b7893ccfSSadaf Ebrahimi {
4346*b7893ccfSSadaf Ebrahimi VmaListItem* pPrev;
4347*b7893ccfSSadaf Ebrahimi VmaListItem* pNext;
4348*b7893ccfSSadaf Ebrahimi T Value;
4349*b7893ccfSSadaf Ebrahimi };
4350*b7893ccfSSadaf Ebrahimi
4351*b7893ccfSSadaf Ebrahimi // Doubly linked list.
4352*b7893ccfSSadaf Ebrahimi template<typename T>
4353*b7893ccfSSadaf Ebrahimi class VmaRawList
4354*b7893ccfSSadaf Ebrahimi {
4355*b7893ccfSSadaf Ebrahimi VMA_CLASS_NO_COPY(VmaRawList)
4356*b7893ccfSSadaf Ebrahimi public:
4357*b7893ccfSSadaf Ebrahimi typedef VmaListItem<T> ItemType;
4358*b7893ccfSSadaf Ebrahimi
4359*b7893ccfSSadaf Ebrahimi VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4360*b7893ccfSSadaf Ebrahimi ~VmaRawList();
4361*b7893ccfSSadaf Ebrahimi void Clear();
4362*b7893ccfSSadaf Ebrahimi
GetCount()4363*b7893ccfSSadaf Ebrahimi size_t GetCount() const { return m_Count; }
IsEmpty()4364*b7893ccfSSadaf Ebrahimi bool IsEmpty() const { return m_Count == 0; }
4365*b7893ccfSSadaf Ebrahimi
Front()4366*b7893ccfSSadaf Ebrahimi ItemType* Front() { return m_pFront; }
Front()4367*b7893ccfSSadaf Ebrahimi const ItemType* Front() const { return m_pFront; }
Back()4368*b7893ccfSSadaf Ebrahimi ItemType* Back() { return m_pBack; }
Back()4369*b7893ccfSSadaf Ebrahimi const ItemType* Back() const { return m_pBack; }
4370*b7893ccfSSadaf Ebrahimi
4371*b7893ccfSSadaf Ebrahimi ItemType* PushBack();
4372*b7893ccfSSadaf Ebrahimi ItemType* PushFront();
4373*b7893ccfSSadaf Ebrahimi ItemType* PushBack(const T& value);
4374*b7893ccfSSadaf Ebrahimi ItemType* PushFront(const T& value);
4375*b7893ccfSSadaf Ebrahimi void PopBack();
4376*b7893ccfSSadaf Ebrahimi void PopFront();
4377*b7893ccfSSadaf Ebrahimi
4378*b7893ccfSSadaf Ebrahimi // Item can be null - it means PushBack.
4379*b7893ccfSSadaf Ebrahimi ItemType* InsertBefore(ItemType* pItem);
4380*b7893ccfSSadaf Ebrahimi // Item can be null - it means PushFront.
4381*b7893ccfSSadaf Ebrahimi ItemType* InsertAfter(ItemType* pItem);
4382*b7893ccfSSadaf Ebrahimi
4383*b7893ccfSSadaf Ebrahimi ItemType* InsertBefore(ItemType* pItem, const T& value);
4384*b7893ccfSSadaf Ebrahimi ItemType* InsertAfter(ItemType* pItem, const T& value);
4385*b7893ccfSSadaf Ebrahimi
4386*b7893ccfSSadaf Ebrahimi void Remove(ItemType* pItem);
4387*b7893ccfSSadaf Ebrahimi
4388*b7893ccfSSadaf Ebrahimi private:
4389*b7893ccfSSadaf Ebrahimi const VkAllocationCallbacks* const m_pAllocationCallbacks;
4390*b7893ccfSSadaf Ebrahimi VmaPoolAllocator<ItemType> m_ItemAllocator;
4391*b7893ccfSSadaf Ebrahimi ItemType* m_pFront;
4392*b7893ccfSSadaf Ebrahimi ItemType* m_pBack;
4393*b7893ccfSSadaf Ebrahimi size_t m_Count;
4394*b7893ccfSSadaf Ebrahimi };
4395*b7893ccfSSadaf Ebrahimi
4396*b7893ccfSSadaf Ebrahimi template<typename T>
VmaRawList(const VkAllocationCallbacks * pAllocationCallbacks)4397*b7893ccfSSadaf Ebrahimi VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4398*b7893ccfSSadaf Ebrahimi m_pAllocationCallbacks(pAllocationCallbacks),
4399*b7893ccfSSadaf Ebrahimi m_ItemAllocator(pAllocationCallbacks, 128),
4400*b7893ccfSSadaf Ebrahimi m_pFront(VMA_NULL),
4401*b7893ccfSSadaf Ebrahimi m_pBack(VMA_NULL),
4402*b7893ccfSSadaf Ebrahimi m_Count(0)
4403*b7893ccfSSadaf Ebrahimi {
4404*b7893ccfSSadaf Ebrahimi }
4405*b7893ccfSSadaf Ebrahimi
4406*b7893ccfSSadaf Ebrahimi template<typename T>
~VmaRawList()4407*b7893ccfSSadaf Ebrahimi VmaRawList<T>::~VmaRawList()
4408*b7893ccfSSadaf Ebrahimi {
4409*b7893ccfSSadaf Ebrahimi // Intentionally not calling Clear, because that would be unnecessary
4410*b7893ccfSSadaf Ebrahimi // computations to return all items to m_ItemAllocator as free.
4411*b7893ccfSSadaf Ebrahimi }
4412*b7893ccfSSadaf Ebrahimi
4413*b7893ccfSSadaf Ebrahimi template<typename T>
Clear()4414*b7893ccfSSadaf Ebrahimi void VmaRawList<T>::Clear()
4415*b7893ccfSSadaf Ebrahimi {
4416*b7893ccfSSadaf Ebrahimi if(IsEmpty() == false)
4417*b7893ccfSSadaf Ebrahimi {
4418*b7893ccfSSadaf Ebrahimi ItemType* pItem = m_pBack;
4419*b7893ccfSSadaf Ebrahimi while(pItem != VMA_NULL)
4420*b7893ccfSSadaf Ebrahimi {
4421*b7893ccfSSadaf Ebrahimi ItemType* const pPrevItem = pItem->pPrev;
4422*b7893ccfSSadaf Ebrahimi m_ItemAllocator.Free(pItem);
4423*b7893ccfSSadaf Ebrahimi pItem = pPrevItem;
4424*b7893ccfSSadaf Ebrahimi }
4425*b7893ccfSSadaf Ebrahimi m_pFront = VMA_NULL;
4426*b7893ccfSSadaf Ebrahimi m_pBack = VMA_NULL;
4427*b7893ccfSSadaf Ebrahimi m_Count = 0;
4428*b7893ccfSSadaf Ebrahimi }
4429*b7893ccfSSadaf Ebrahimi }
4430*b7893ccfSSadaf Ebrahimi
4431*b7893ccfSSadaf Ebrahimi template<typename T>
PushBack()4432*b7893ccfSSadaf Ebrahimi VmaListItem<T>* VmaRawList<T>::PushBack()
4433*b7893ccfSSadaf Ebrahimi {
4434*b7893ccfSSadaf Ebrahimi ItemType* const pNewItem = m_ItemAllocator.Alloc();
4435*b7893ccfSSadaf Ebrahimi pNewItem->pNext = VMA_NULL;
4436*b7893ccfSSadaf Ebrahimi if(IsEmpty())
4437*b7893ccfSSadaf Ebrahimi {
4438*b7893ccfSSadaf Ebrahimi pNewItem->pPrev = VMA_NULL;
4439*b7893ccfSSadaf Ebrahimi m_pFront = pNewItem;
4440*b7893ccfSSadaf Ebrahimi m_pBack = pNewItem;
4441*b7893ccfSSadaf Ebrahimi m_Count = 1;
4442*b7893ccfSSadaf Ebrahimi }
4443*b7893ccfSSadaf Ebrahimi else
4444*b7893ccfSSadaf Ebrahimi {
4445*b7893ccfSSadaf Ebrahimi pNewItem->pPrev = m_pBack;
4446*b7893ccfSSadaf Ebrahimi m_pBack->pNext = pNewItem;
4447*b7893ccfSSadaf Ebrahimi m_pBack = pNewItem;
4448*b7893ccfSSadaf Ebrahimi ++m_Count;
4449*b7893ccfSSadaf Ebrahimi }
4450*b7893ccfSSadaf Ebrahimi return pNewItem;
4451*b7893ccfSSadaf Ebrahimi }
4452*b7893ccfSSadaf Ebrahimi
4453*b7893ccfSSadaf Ebrahimi template<typename T>
PushFront()4454*b7893ccfSSadaf Ebrahimi VmaListItem<T>* VmaRawList<T>::PushFront()
4455*b7893ccfSSadaf Ebrahimi {
4456*b7893ccfSSadaf Ebrahimi ItemType* const pNewItem = m_ItemAllocator.Alloc();
4457*b7893ccfSSadaf Ebrahimi pNewItem->pPrev = VMA_NULL;
4458*b7893ccfSSadaf Ebrahimi if(IsEmpty())
4459*b7893ccfSSadaf Ebrahimi {
4460*b7893ccfSSadaf Ebrahimi pNewItem->pNext = VMA_NULL;
4461*b7893ccfSSadaf Ebrahimi m_pFront = pNewItem;
4462*b7893ccfSSadaf Ebrahimi m_pBack = pNewItem;
4463*b7893ccfSSadaf Ebrahimi m_Count = 1;
4464*b7893ccfSSadaf Ebrahimi }
4465*b7893ccfSSadaf Ebrahimi else
4466*b7893ccfSSadaf Ebrahimi {
4467*b7893ccfSSadaf Ebrahimi pNewItem->pNext = m_pFront;
4468*b7893ccfSSadaf Ebrahimi m_pFront->pPrev = pNewItem;
4469*b7893ccfSSadaf Ebrahimi m_pFront = pNewItem;
4470*b7893ccfSSadaf Ebrahimi ++m_Count;
4471*b7893ccfSSadaf Ebrahimi }
4472*b7893ccfSSadaf Ebrahimi return pNewItem;
4473*b7893ccfSSadaf Ebrahimi }
4474*b7893ccfSSadaf Ebrahimi
4475*b7893ccfSSadaf Ebrahimi template<typename T>
PushBack(const T & value)4476*b7893ccfSSadaf Ebrahimi VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4477*b7893ccfSSadaf Ebrahimi {
4478*b7893ccfSSadaf Ebrahimi ItemType* const pNewItem = PushBack();
4479*b7893ccfSSadaf Ebrahimi pNewItem->Value = value;
4480*b7893ccfSSadaf Ebrahimi return pNewItem;
4481*b7893ccfSSadaf Ebrahimi }
4482*b7893ccfSSadaf Ebrahimi
4483*b7893ccfSSadaf Ebrahimi template<typename T>
PushFront(const T & value)4484*b7893ccfSSadaf Ebrahimi VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4485*b7893ccfSSadaf Ebrahimi {
4486*b7893ccfSSadaf Ebrahimi ItemType* const pNewItem = PushFront();
4487*b7893ccfSSadaf Ebrahimi pNewItem->Value = value;
4488*b7893ccfSSadaf Ebrahimi return pNewItem;
4489*b7893ccfSSadaf Ebrahimi }
4490*b7893ccfSSadaf Ebrahimi
4491*b7893ccfSSadaf Ebrahimi template<typename T>
PopBack()4492*b7893ccfSSadaf Ebrahimi void VmaRawList<T>::PopBack()
4493*b7893ccfSSadaf Ebrahimi {
4494*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_Count > 0);
4495*b7893ccfSSadaf Ebrahimi ItemType* const pBackItem = m_pBack;
4496*b7893ccfSSadaf Ebrahimi ItemType* const pPrevItem = pBackItem->pPrev;
4497*b7893ccfSSadaf Ebrahimi if(pPrevItem != VMA_NULL)
4498*b7893ccfSSadaf Ebrahimi {
4499*b7893ccfSSadaf Ebrahimi pPrevItem->pNext = VMA_NULL;
4500*b7893ccfSSadaf Ebrahimi }
4501*b7893ccfSSadaf Ebrahimi m_pBack = pPrevItem;
4502*b7893ccfSSadaf Ebrahimi m_ItemAllocator.Free(pBackItem);
4503*b7893ccfSSadaf Ebrahimi --m_Count;
4504*b7893ccfSSadaf Ebrahimi }
4505*b7893ccfSSadaf Ebrahimi
4506*b7893ccfSSadaf Ebrahimi template<typename T>
PopFront()4507*b7893ccfSSadaf Ebrahimi void VmaRawList<T>::PopFront()
4508*b7893ccfSSadaf Ebrahimi {
4509*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_Count > 0);
4510*b7893ccfSSadaf Ebrahimi ItemType* const pFrontItem = m_pFront;
4511*b7893ccfSSadaf Ebrahimi ItemType* const pNextItem = pFrontItem->pNext;
4512*b7893ccfSSadaf Ebrahimi if(pNextItem != VMA_NULL)
4513*b7893ccfSSadaf Ebrahimi {
4514*b7893ccfSSadaf Ebrahimi pNextItem->pPrev = VMA_NULL;
4515*b7893ccfSSadaf Ebrahimi }
4516*b7893ccfSSadaf Ebrahimi m_pFront = pNextItem;
4517*b7893ccfSSadaf Ebrahimi m_ItemAllocator.Free(pFrontItem);
4518*b7893ccfSSadaf Ebrahimi --m_Count;
4519*b7893ccfSSadaf Ebrahimi }
4520*b7893ccfSSadaf Ebrahimi
4521*b7893ccfSSadaf Ebrahimi template<typename T>
Remove(ItemType * pItem)4522*b7893ccfSSadaf Ebrahimi void VmaRawList<T>::Remove(ItemType* pItem)
4523*b7893ccfSSadaf Ebrahimi {
4524*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4525*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_Count > 0);
4526*b7893ccfSSadaf Ebrahimi
4527*b7893ccfSSadaf Ebrahimi if(pItem->pPrev != VMA_NULL)
4528*b7893ccfSSadaf Ebrahimi {
4529*b7893ccfSSadaf Ebrahimi pItem->pPrev->pNext = pItem->pNext;
4530*b7893ccfSSadaf Ebrahimi }
4531*b7893ccfSSadaf Ebrahimi else
4532*b7893ccfSSadaf Ebrahimi {
4533*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_pFront == pItem);
4534*b7893ccfSSadaf Ebrahimi m_pFront = pItem->pNext;
4535*b7893ccfSSadaf Ebrahimi }
4536*b7893ccfSSadaf Ebrahimi
4537*b7893ccfSSadaf Ebrahimi if(pItem->pNext != VMA_NULL)
4538*b7893ccfSSadaf Ebrahimi {
4539*b7893ccfSSadaf Ebrahimi pItem->pNext->pPrev = pItem->pPrev;
4540*b7893ccfSSadaf Ebrahimi }
4541*b7893ccfSSadaf Ebrahimi else
4542*b7893ccfSSadaf Ebrahimi {
4543*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_pBack == pItem);
4544*b7893ccfSSadaf Ebrahimi m_pBack = pItem->pPrev;
4545*b7893ccfSSadaf Ebrahimi }
4546*b7893ccfSSadaf Ebrahimi
4547*b7893ccfSSadaf Ebrahimi m_ItemAllocator.Free(pItem);
4548*b7893ccfSSadaf Ebrahimi --m_Count;
4549*b7893ccfSSadaf Ebrahimi }
4550*b7893ccfSSadaf Ebrahimi
4551*b7893ccfSSadaf Ebrahimi template<typename T>
InsertBefore(ItemType * pItem)4552*b7893ccfSSadaf Ebrahimi VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4553*b7893ccfSSadaf Ebrahimi {
4554*b7893ccfSSadaf Ebrahimi if(pItem != VMA_NULL)
4555*b7893ccfSSadaf Ebrahimi {
4556*b7893ccfSSadaf Ebrahimi ItemType* const prevItem = pItem->pPrev;
4557*b7893ccfSSadaf Ebrahimi ItemType* const newItem = m_ItemAllocator.Alloc();
4558*b7893ccfSSadaf Ebrahimi newItem->pPrev = prevItem;
4559*b7893ccfSSadaf Ebrahimi newItem->pNext = pItem;
4560*b7893ccfSSadaf Ebrahimi pItem->pPrev = newItem;
4561*b7893ccfSSadaf Ebrahimi if(prevItem != VMA_NULL)
4562*b7893ccfSSadaf Ebrahimi {
4563*b7893ccfSSadaf Ebrahimi prevItem->pNext = newItem;
4564*b7893ccfSSadaf Ebrahimi }
4565*b7893ccfSSadaf Ebrahimi else
4566*b7893ccfSSadaf Ebrahimi {
4567*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_pFront == pItem);
4568*b7893ccfSSadaf Ebrahimi m_pFront = newItem;
4569*b7893ccfSSadaf Ebrahimi }
4570*b7893ccfSSadaf Ebrahimi ++m_Count;
4571*b7893ccfSSadaf Ebrahimi return newItem;
4572*b7893ccfSSadaf Ebrahimi }
4573*b7893ccfSSadaf Ebrahimi else
4574*b7893ccfSSadaf Ebrahimi return PushBack();
4575*b7893ccfSSadaf Ebrahimi }
4576*b7893ccfSSadaf Ebrahimi
4577*b7893ccfSSadaf Ebrahimi template<typename T>
InsertAfter(ItemType * pItem)4578*b7893ccfSSadaf Ebrahimi VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4579*b7893ccfSSadaf Ebrahimi {
4580*b7893ccfSSadaf Ebrahimi if(pItem != VMA_NULL)
4581*b7893ccfSSadaf Ebrahimi {
4582*b7893ccfSSadaf Ebrahimi ItemType* const nextItem = pItem->pNext;
4583*b7893ccfSSadaf Ebrahimi ItemType* const newItem = m_ItemAllocator.Alloc();
4584*b7893ccfSSadaf Ebrahimi newItem->pNext = nextItem;
4585*b7893ccfSSadaf Ebrahimi newItem->pPrev = pItem;
4586*b7893ccfSSadaf Ebrahimi pItem->pNext = newItem;
4587*b7893ccfSSadaf Ebrahimi if(nextItem != VMA_NULL)
4588*b7893ccfSSadaf Ebrahimi {
4589*b7893ccfSSadaf Ebrahimi nextItem->pPrev = newItem;
4590*b7893ccfSSadaf Ebrahimi }
4591*b7893ccfSSadaf Ebrahimi else
4592*b7893ccfSSadaf Ebrahimi {
4593*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_pBack == pItem);
4594*b7893ccfSSadaf Ebrahimi m_pBack = newItem;
4595*b7893ccfSSadaf Ebrahimi }
4596*b7893ccfSSadaf Ebrahimi ++m_Count;
4597*b7893ccfSSadaf Ebrahimi return newItem;
4598*b7893ccfSSadaf Ebrahimi }
4599*b7893ccfSSadaf Ebrahimi else
4600*b7893ccfSSadaf Ebrahimi return PushFront();
4601*b7893ccfSSadaf Ebrahimi }
4602*b7893ccfSSadaf Ebrahimi
4603*b7893ccfSSadaf Ebrahimi template<typename T>
InsertBefore(ItemType * pItem,const T & value)4604*b7893ccfSSadaf Ebrahimi VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4605*b7893ccfSSadaf Ebrahimi {
4606*b7893ccfSSadaf Ebrahimi ItemType* const newItem = InsertBefore(pItem);
4607*b7893ccfSSadaf Ebrahimi newItem->Value = value;
4608*b7893ccfSSadaf Ebrahimi return newItem;
4609*b7893ccfSSadaf Ebrahimi }
4610*b7893ccfSSadaf Ebrahimi
4611*b7893ccfSSadaf Ebrahimi template<typename T>
InsertAfter(ItemType * pItem,const T & value)4612*b7893ccfSSadaf Ebrahimi VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4613*b7893ccfSSadaf Ebrahimi {
4614*b7893ccfSSadaf Ebrahimi ItemType* const newItem = InsertAfter(pItem);
4615*b7893ccfSSadaf Ebrahimi newItem->Value = value;
4616*b7893ccfSSadaf Ebrahimi return newItem;
4617*b7893ccfSSadaf Ebrahimi }
4618*b7893ccfSSadaf Ebrahimi
4619*b7893ccfSSadaf Ebrahimi template<typename T, typename AllocatorT>
4620*b7893ccfSSadaf Ebrahimi class VmaList
4621*b7893ccfSSadaf Ebrahimi {
VMA_CLASS_NO_COPY(VmaList)4622*b7893ccfSSadaf Ebrahimi VMA_CLASS_NO_COPY(VmaList)
4623*b7893ccfSSadaf Ebrahimi public:
4624*b7893ccfSSadaf Ebrahimi class iterator
4625*b7893ccfSSadaf Ebrahimi {
4626*b7893ccfSSadaf Ebrahimi public:
4627*b7893ccfSSadaf Ebrahimi iterator() :
4628*b7893ccfSSadaf Ebrahimi m_pList(VMA_NULL),
4629*b7893ccfSSadaf Ebrahimi m_pItem(VMA_NULL)
4630*b7893ccfSSadaf Ebrahimi {
4631*b7893ccfSSadaf Ebrahimi }
4632*b7893ccfSSadaf Ebrahimi
4633*b7893ccfSSadaf Ebrahimi T& operator*() const
4634*b7893ccfSSadaf Ebrahimi {
4635*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4636*b7893ccfSSadaf Ebrahimi return m_pItem->Value;
4637*b7893ccfSSadaf Ebrahimi }
4638*b7893ccfSSadaf Ebrahimi T* operator->() const
4639*b7893ccfSSadaf Ebrahimi {
4640*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4641*b7893ccfSSadaf Ebrahimi return &m_pItem->Value;
4642*b7893ccfSSadaf Ebrahimi }
4643*b7893ccfSSadaf Ebrahimi
4644*b7893ccfSSadaf Ebrahimi iterator& operator++()
4645*b7893ccfSSadaf Ebrahimi {
4646*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4647*b7893ccfSSadaf Ebrahimi m_pItem = m_pItem->pNext;
4648*b7893ccfSSadaf Ebrahimi return *this;
4649*b7893ccfSSadaf Ebrahimi }
4650*b7893ccfSSadaf Ebrahimi iterator& operator--()
4651*b7893ccfSSadaf Ebrahimi {
4652*b7893ccfSSadaf Ebrahimi if(m_pItem != VMA_NULL)
4653*b7893ccfSSadaf Ebrahimi {
4654*b7893ccfSSadaf Ebrahimi m_pItem = m_pItem->pPrev;
4655*b7893ccfSSadaf Ebrahimi }
4656*b7893ccfSSadaf Ebrahimi else
4657*b7893ccfSSadaf Ebrahimi {
4658*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4659*b7893ccfSSadaf Ebrahimi m_pItem = m_pList->Back();
4660*b7893ccfSSadaf Ebrahimi }
4661*b7893ccfSSadaf Ebrahimi return *this;
4662*b7893ccfSSadaf Ebrahimi }
4663*b7893ccfSSadaf Ebrahimi
4664*b7893ccfSSadaf Ebrahimi iterator operator++(int)
4665*b7893ccfSSadaf Ebrahimi {
4666*b7893ccfSSadaf Ebrahimi iterator result = *this;
4667*b7893ccfSSadaf Ebrahimi ++*this;
4668*b7893ccfSSadaf Ebrahimi return result;
4669*b7893ccfSSadaf Ebrahimi }
4670*b7893ccfSSadaf Ebrahimi iterator operator--(int)
4671*b7893ccfSSadaf Ebrahimi {
4672*b7893ccfSSadaf Ebrahimi iterator result = *this;
4673*b7893ccfSSadaf Ebrahimi --*this;
4674*b7893ccfSSadaf Ebrahimi return result;
4675*b7893ccfSSadaf Ebrahimi }
4676*b7893ccfSSadaf Ebrahimi
4677*b7893ccfSSadaf Ebrahimi bool operator==(const iterator& rhs) const
4678*b7893ccfSSadaf Ebrahimi {
4679*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4680*b7893ccfSSadaf Ebrahimi return m_pItem == rhs.m_pItem;
4681*b7893ccfSSadaf Ebrahimi }
4682*b7893ccfSSadaf Ebrahimi bool operator!=(const iterator& rhs) const
4683*b7893ccfSSadaf Ebrahimi {
4684*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4685*b7893ccfSSadaf Ebrahimi return m_pItem != rhs.m_pItem;
4686*b7893ccfSSadaf Ebrahimi }
4687*b7893ccfSSadaf Ebrahimi
4688*b7893ccfSSadaf Ebrahimi private:
4689*b7893ccfSSadaf Ebrahimi VmaRawList<T>* m_pList;
4690*b7893ccfSSadaf Ebrahimi VmaListItem<T>* m_pItem;
4691*b7893ccfSSadaf Ebrahimi
4692*b7893ccfSSadaf Ebrahimi iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4693*b7893ccfSSadaf Ebrahimi m_pList(pList),
4694*b7893ccfSSadaf Ebrahimi m_pItem(pItem)
4695*b7893ccfSSadaf Ebrahimi {
4696*b7893ccfSSadaf Ebrahimi }
4697*b7893ccfSSadaf Ebrahimi
4698*b7893ccfSSadaf Ebrahimi friend class VmaList<T, AllocatorT>;
4699*b7893ccfSSadaf Ebrahimi };
4700*b7893ccfSSadaf Ebrahimi
4701*b7893ccfSSadaf Ebrahimi class const_iterator
4702*b7893ccfSSadaf Ebrahimi {
4703*b7893ccfSSadaf Ebrahimi public:
const_iterator()4704*b7893ccfSSadaf Ebrahimi const_iterator() :
4705*b7893ccfSSadaf Ebrahimi m_pList(VMA_NULL),
4706*b7893ccfSSadaf Ebrahimi m_pItem(VMA_NULL)
4707*b7893ccfSSadaf Ebrahimi {
4708*b7893ccfSSadaf Ebrahimi }
4709*b7893ccfSSadaf Ebrahimi
const_iterator(const iterator & src)4710*b7893ccfSSadaf Ebrahimi const_iterator(const iterator& src) :
4711*b7893ccfSSadaf Ebrahimi m_pList(src.m_pList),
4712*b7893ccfSSadaf Ebrahimi m_pItem(src.m_pItem)
4713*b7893ccfSSadaf Ebrahimi {
4714*b7893ccfSSadaf Ebrahimi }
4715*b7893ccfSSadaf Ebrahimi
4716*b7893ccfSSadaf Ebrahimi const T& operator*() const
4717*b7893ccfSSadaf Ebrahimi {
4718*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4719*b7893ccfSSadaf Ebrahimi return m_pItem->Value;
4720*b7893ccfSSadaf Ebrahimi }
4721*b7893ccfSSadaf Ebrahimi const T* operator->() const
4722*b7893ccfSSadaf Ebrahimi {
4723*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4724*b7893ccfSSadaf Ebrahimi return &m_pItem->Value;
4725*b7893ccfSSadaf Ebrahimi }
4726*b7893ccfSSadaf Ebrahimi
4727*b7893ccfSSadaf Ebrahimi const_iterator& operator++()
4728*b7893ccfSSadaf Ebrahimi {
4729*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4730*b7893ccfSSadaf Ebrahimi m_pItem = m_pItem->pNext;
4731*b7893ccfSSadaf Ebrahimi return *this;
4732*b7893ccfSSadaf Ebrahimi }
4733*b7893ccfSSadaf Ebrahimi const_iterator& operator--()
4734*b7893ccfSSadaf Ebrahimi {
4735*b7893ccfSSadaf Ebrahimi if(m_pItem != VMA_NULL)
4736*b7893ccfSSadaf Ebrahimi {
4737*b7893ccfSSadaf Ebrahimi m_pItem = m_pItem->pPrev;
4738*b7893ccfSSadaf Ebrahimi }
4739*b7893ccfSSadaf Ebrahimi else
4740*b7893ccfSSadaf Ebrahimi {
4741*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4742*b7893ccfSSadaf Ebrahimi m_pItem = m_pList->Back();
4743*b7893ccfSSadaf Ebrahimi }
4744*b7893ccfSSadaf Ebrahimi return *this;
4745*b7893ccfSSadaf Ebrahimi }
4746*b7893ccfSSadaf Ebrahimi
4747*b7893ccfSSadaf Ebrahimi const_iterator operator++(int)
4748*b7893ccfSSadaf Ebrahimi {
4749*b7893ccfSSadaf Ebrahimi const_iterator result = *this;
4750*b7893ccfSSadaf Ebrahimi ++*this;
4751*b7893ccfSSadaf Ebrahimi return result;
4752*b7893ccfSSadaf Ebrahimi }
4753*b7893ccfSSadaf Ebrahimi const_iterator operator--(int)
4754*b7893ccfSSadaf Ebrahimi {
4755*b7893ccfSSadaf Ebrahimi const_iterator result = *this;
4756*b7893ccfSSadaf Ebrahimi --*this;
4757*b7893ccfSSadaf Ebrahimi return result;
4758*b7893ccfSSadaf Ebrahimi }
4759*b7893ccfSSadaf Ebrahimi
4760*b7893ccfSSadaf Ebrahimi bool operator==(const const_iterator& rhs) const
4761*b7893ccfSSadaf Ebrahimi {
4762*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4763*b7893ccfSSadaf Ebrahimi return m_pItem == rhs.m_pItem;
4764*b7893ccfSSadaf Ebrahimi }
4765*b7893ccfSSadaf Ebrahimi bool operator!=(const const_iterator& rhs) const
4766*b7893ccfSSadaf Ebrahimi {
4767*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4768*b7893ccfSSadaf Ebrahimi return m_pItem != rhs.m_pItem;
4769*b7893ccfSSadaf Ebrahimi }
4770*b7893ccfSSadaf Ebrahimi
4771*b7893ccfSSadaf Ebrahimi private:
const_iterator(const VmaRawList<T> * pList,const VmaListItem<T> * pItem)4772*b7893ccfSSadaf Ebrahimi const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4773*b7893ccfSSadaf Ebrahimi m_pList(pList),
4774*b7893ccfSSadaf Ebrahimi m_pItem(pItem)
4775*b7893ccfSSadaf Ebrahimi {
4776*b7893ccfSSadaf Ebrahimi }
4777*b7893ccfSSadaf Ebrahimi
4778*b7893ccfSSadaf Ebrahimi const VmaRawList<T>* m_pList;
4779*b7893ccfSSadaf Ebrahimi const VmaListItem<T>* m_pItem;
4780*b7893ccfSSadaf Ebrahimi
4781*b7893ccfSSadaf Ebrahimi friend class VmaList<T, AllocatorT>;
4782*b7893ccfSSadaf Ebrahimi };
4783*b7893ccfSSadaf Ebrahimi
VmaList(const AllocatorT & allocator)4784*b7893ccfSSadaf Ebrahimi VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4785*b7893ccfSSadaf Ebrahimi
empty()4786*b7893ccfSSadaf Ebrahimi bool empty() const { return m_RawList.IsEmpty(); }
size()4787*b7893ccfSSadaf Ebrahimi size_t size() const { return m_RawList.GetCount(); }
4788*b7893ccfSSadaf Ebrahimi
begin()4789*b7893ccfSSadaf Ebrahimi iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
end()4790*b7893ccfSSadaf Ebrahimi iterator end() { return iterator(&m_RawList, VMA_NULL); }
4791*b7893ccfSSadaf Ebrahimi
cbegin()4792*b7893ccfSSadaf Ebrahimi const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
cend()4793*b7893ccfSSadaf Ebrahimi const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4794*b7893ccfSSadaf Ebrahimi
clear()4795*b7893ccfSSadaf Ebrahimi void clear() { m_RawList.Clear(); }
push_back(const T & value)4796*b7893ccfSSadaf Ebrahimi void push_back(const T& value) { m_RawList.PushBack(value); }
erase(iterator it)4797*b7893ccfSSadaf Ebrahimi void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
insert(iterator it,const T & value)4798*b7893ccfSSadaf Ebrahimi iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4799*b7893ccfSSadaf Ebrahimi
4800*b7893ccfSSadaf Ebrahimi private:
4801*b7893ccfSSadaf Ebrahimi VmaRawList<T> m_RawList;
4802*b7893ccfSSadaf Ebrahimi };
4803*b7893ccfSSadaf Ebrahimi
4804*b7893ccfSSadaf Ebrahimi #endif // #if VMA_USE_STL_LIST
4805*b7893ccfSSadaf Ebrahimi
4806*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
4807*b7893ccfSSadaf Ebrahimi // class VmaMap
4808*b7893ccfSSadaf Ebrahimi
4809*b7893ccfSSadaf Ebrahimi // Unused in this version.
4810*b7893ccfSSadaf Ebrahimi #if 0
4811*b7893ccfSSadaf Ebrahimi
4812*b7893ccfSSadaf Ebrahimi #if VMA_USE_STL_UNORDERED_MAP
4813*b7893ccfSSadaf Ebrahimi
4814*b7893ccfSSadaf Ebrahimi #define VmaPair std::pair
4815*b7893ccfSSadaf Ebrahimi
4816*b7893ccfSSadaf Ebrahimi #define VMA_MAP_TYPE(KeyT, ValueT) \
4817*b7893ccfSSadaf Ebrahimi std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4818*b7893ccfSSadaf Ebrahimi
4819*b7893ccfSSadaf Ebrahimi #else // #if VMA_USE_STL_UNORDERED_MAP
4820*b7893ccfSSadaf Ebrahimi
4821*b7893ccfSSadaf Ebrahimi template<typename T1, typename T2>
4822*b7893ccfSSadaf Ebrahimi struct VmaPair
4823*b7893ccfSSadaf Ebrahimi {
4824*b7893ccfSSadaf Ebrahimi T1 first;
4825*b7893ccfSSadaf Ebrahimi T2 second;
4826*b7893ccfSSadaf Ebrahimi
4827*b7893ccfSSadaf Ebrahimi VmaPair() : first(), second() { }
4828*b7893ccfSSadaf Ebrahimi VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4829*b7893ccfSSadaf Ebrahimi };
4830*b7893ccfSSadaf Ebrahimi
4831*b7893ccfSSadaf Ebrahimi /* Class compatible with subset of interface of std::unordered_map.
4832*b7893ccfSSadaf Ebrahimi KeyT, ValueT must be POD because they will be stored in VmaVector.
4833*b7893ccfSSadaf Ebrahimi */
4834*b7893ccfSSadaf Ebrahimi template<typename KeyT, typename ValueT>
4835*b7893ccfSSadaf Ebrahimi class VmaMap
4836*b7893ccfSSadaf Ebrahimi {
4837*b7893ccfSSadaf Ebrahimi public:
4838*b7893ccfSSadaf Ebrahimi typedef VmaPair<KeyT, ValueT> PairType;
4839*b7893ccfSSadaf Ebrahimi typedef PairType* iterator;
4840*b7893ccfSSadaf Ebrahimi
4841*b7893ccfSSadaf Ebrahimi VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4842*b7893ccfSSadaf Ebrahimi
4843*b7893ccfSSadaf Ebrahimi iterator begin() { return m_Vector.begin(); }
4844*b7893ccfSSadaf Ebrahimi iterator end() { return m_Vector.end(); }
4845*b7893ccfSSadaf Ebrahimi
4846*b7893ccfSSadaf Ebrahimi void insert(const PairType& pair);
4847*b7893ccfSSadaf Ebrahimi iterator find(const KeyT& key);
4848*b7893ccfSSadaf Ebrahimi void erase(iterator it);
4849*b7893ccfSSadaf Ebrahimi
4850*b7893ccfSSadaf Ebrahimi private:
4851*b7893ccfSSadaf Ebrahimi VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4852*b7893ccfSSadaf Ebrahimi };
4853*b7893ccfSSadaf Ebrahimi
4854*b7893ccfSSadaf Ebrahimi #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4855*b7893ccfSSadaf Ebrahimi
4856*b7893ccfSSadaf Ebrahimi template<typename FirstT, typename SecondT>
4857*b7893ccfSSadaf Ebrahimi struct VmaPairFirstLess
4858*b7893ccfSSadaf Ebrahimi {
4859*b7893ccfSSadaf Ebrahimi bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4860*b7893ccfSSadaf Ebrahimi {
4861*b7893ccfSSadaf Ebrahimi return lhs.first < rhs.first;
4862*b7893ccfSSadaf Ebrahimi }
4863*b7893ccfSSadaf Ebrahimi bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4864*b7893ccfSSadaf Ebrahimi {
4865*b7893ccfSSadaf Ebrahimi return lhs.first < rhsFirst;
4866*b7893ccfSSadaf Ebrahimi }
4867*b7893ccfSSadaf Ebrahimi };
4868*b7893ccfSSadaf Ebrahimi
4869*b7893ccfSSadaf Ebrahimi template<typename KeyT, typename ValueT>
4870*b7893ccfSSadaf Ebrahimi void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4871*b7893ccfSSadaf Ebrahimi {
4872*b7893ccfSSadaf Ebrahimi const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4873*b7893ccfSSadaf Ebrahimi m_Vector.data(),
4874*b7893ccfSSadaf Ebrahimi m_Vector.data() + m_Vector.size(),
4875*b7893ccfSSadaf Ebrahimi pair,
4876*b7893ccfSSadaf Ebrahimi VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4877*b7893ccfSSadaf Ebrahimi VmaVectorInsert(m_Vector, indexToInsert, pair);
4878*b7893ccfSSadaf Ebrahimi }
4879*b7893ccfSSadaf Ebrahimi
4880*b7893ccfSSadaf Ebrahimi template<typename KeyT, typename ValueT>
4881*b7893ccfSSadaf Ebrahimi VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4882*b7893ccfSSadaf Ebrahimi {
4883*b7893ccfSSadaf Ebrahimi PairType* it = VmaBinaryFindFirstNotLess(
4884*b7893ccfSSadaf Ebrahimi m_Vector.data(),
4885*b7893ccfSSadaf Ebrahimi m_Vector.data() + m_Vector.size(),
4886*b7893ccfSSadaf Ebrahimi key,
4887*b7893ccfSSadaf Ebrahimi VmaPairFirstLess<KeyT, ValueT>());
4888*b7893ccfSSadaf Ebrahimi if((it != m_Vector.end()) && (it->first == key))
4889*b7893ccfSSadaf Ebrahimi {
4890*b7893ccfSSadaf Ebrahimi return it;
4891*b7893ccfSSadaf Ebrahimi }
4892*b7893ccfSSadaf Ebrahimi else
4893*b7893ccfSSadaf Ebrahimi {
4894*b7893ccfSSadaf Ebrahimi return m_Vector.end();
4895*b7893ccfSSadaf Ebrahimi }
4896*b7893ccfSSadaf Ebrahimi }
4897*b7893ccfSSadaf Ebrahimi
4898*b7893ccfSSadaf Ebrahimi template<typename KeyT, typename ValueT>
4899*b7893ccfSSadaf Ebrahimi void VmaMap<KeyT, ValueT>::erase(iterator it)
4900*b7893ccfSSadaf Ebrahimi {
4901*b7893ccfSSadaf Ebrahimi VmaVectorRemove(m_Vector, it - m_Vector.begin());
4902*b7893ccfSSadaf Ebrahimi }
4903*b7893ccfSSadaf Ebrahimi
4904*b7893ccfSSadaf Ebrahimi #endif // #if VMA_USE_STL_UNORDERED_MAP
4905*b7893ccfSSadaf Ebrahimi
4906*b7893ccfSSadaf Ebrahimi #endif // #if 0
4907*b7893ccfSSadaf Ebrahimi
4908*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
4909*b7893ccfSSadaf Ebrahimi
4910*b7893ccfSSadaf Ebrahimi class VmaDeviceMemoryBlock;
4911*b7893ccfSSadaf Ebrahimi
4912*b7893ccfSSadaf Ebrahimi enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4913*b7893ccfSSadaf Ebrahimi
4914*b7893ccfSSadaf Ebrahimi struct VmaAllocation_T
4915*b7893ccfSSadaf Ebrahimi {
4916*b7893ccfSSadaf Ebrahimi VMA_CLASS_NO_COPY(VmaAllocation_T)
4917*b7893ccfSSadaf Ebrahimi private:
4918*b7893ccfSSadaf Ebrahimi static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4919*b7893ccfSSadaf Ebrahimi
4920*b7893ccfSSadaf Ebrahimi enum FLAGS
4921*b7893ccfSSadaf Ebrahimi {
4922*b7893ccfSSadaf Ebrahimi FLAG_USER_DATA_STRING = 0x01,
4923*b7893ccfSSadaf Ebrahimi };
4924*b7893ccfSSadaf Ebrahimi
4925*b7893ccfSSadaf Ebrahimi public:
4926*b7893ccfSSadaf Ebrahimi enum ALLOCATION_TYPE
4927*b7893ccfSSadaf Ebrahimi {
4928*b7893ccfSSadaf Ebrahimi ALLOCATION_TYPE_NONE,
4929*b7893ccfSSadaf Ebrahimi ALLOCATION_TYPE_BLOCK,
4930*b7893ccfSSadaf Ebrahimi ALLOCATION_TYPE_DEDICATED,
4931*b7893ccfSSadaf Ebrahimi };
4932*b7893ccfSSadaf Ebrahimi
VmaAllocation_TVmaAllocation_T4933*b7893ccfSSadaf Ebrahimi VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4934*b7893ccfSSadaf Ebrahimi m_Alignment(1),
4935*b7893ccfSSadaf Ebrahimi m_Size(0),
4936*b7893ccfSSadaf Ebrahimi m_pUserData(VMA_NULL),
4937*b7893ccfSSadaf Ebrahimi m_LastUseFrameIndex(currentFrameIndex),
4938*b7893ccfSSadaf Ebrahimi m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4939*b7893ccfSSadaf Ebrahimi m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4940*b7893ccfSSadaf Ebrahimi m_MapCount(0),
4941*b7893ccfSSadaf Ebrahimi m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4942*b7893ccfSSadaf Ebrahimi {
4943*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
4944*b7893ccfSSadaf Ebrahimi m_CreationFrameIndex = currentFrameIndex;
4945*b7893ccfSSadaf Ebrahimi m_BufferImageUsage = 0;
4946*b7893ccfSSadaf Ebrahimi #endif
4947*b7893ccfSSadaf Ebrahimi }
4948*b7893ccfSSadaf Ebrahimi
~VmaAllocation_TVmaAllocation_T4949*b7893ccfSSadaf Ebrahimi ~VmaAllocation_T()
4950*b7893ccfSSadaf Ebrahimi {
4951*b7893ccfSSadaf Ebrahimi VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4952*b7893ccfSSadaf Ebrahimi
4953*b7893ccfSSadaf Ebrahimi // Check if owned string was freed.
4954*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_pUserData == VMA_NULL);
4955*b7893ccfSSadaf Ebrahimi }
4956*b7893ccfSSadaf Ebrahimi
InitBlockAllocationVmaAllocation_T4957*b7893ccfSSadaf Ebrahimi void InitBlockAllocation(
4958*b7893ccfSSadaf Ebrahimi VmaPool hPool,
4959*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* block,
4960*b7893ccfSSadaf Ebrahimi VkDeviceSize offset,
4961*b7893ccfSSadaf Ebrahimi VkDeviceSize alignment,
4962*b7893ccfSSadaf Ebrahimi VkDeviceSize size,
4963*b7893ccfSSadaf Ebrahimi VmaSuballocationType suballocationType,
4964*b7893ccfSSadaf Ebrahimi bool mapped,
4965*b7893ccfSSadaf Ebrahimi bool canBecomeLost)
4966*b7893ccfSSadaf Ebrahimi {
4967*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4968*b7893ccfSSadaf Ebrahimi VMA_ASSERT(block != VMA_NULL);
4969*b7893ccfSSadaf Ebrahimi m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4970*b7893ccfSSadaf Ebrahimi m_Alignment = alignment;
4971*b7893ccfSSadaf Ebrahimi m_Size = size;
4972*b7893ccfSSadaf Ebrahimi m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4973*b7893ccfSSadaf Ebrahimi m_SuballocationType = (uint8_t)suballocationType;
4974*b7893ccfSSadaf Ebrahimi m_BlockAllocation.m_hPool = hPool;
4975*b7893ccfSSadaf Ebrahimi m_BlockAllocation.m_Block = block;
4976*b7893ccfSSadaf Ebrahimi m_BlockAllocation.m_Offset = offset;
4977*b7893ccfSSadaf Ebrahimi m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4978*b7893ccfSSadaf Ebrahimi }
4979*b7893ccfSSadaf Ebrahimi
InitLostVmaAllocation_T4980*b7893ccfSSadaf Ebrahimi void InitLost()
4981*b7893ccfSSadaf Ebrahimi {
4982*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4983*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4984*b7893ccfSSadaf Ebrahimi m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4985*b7893ccfSSadaf Ebrahimi m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4986*b7893ccfSSadaf Ebrahimi m_BlockAllocation.m_Block = VMA_NULL;
4987*b7893ccfSSadaf Ebrahimi m_BlockAllocation.m_Offset = 0;
4988*b7893ccfSSadaf Ebrahimi m_BlockAllocation.m_CanBecomeLost = true;
4989*b7893ccfSSadaf Ebrahimi }
4990*b7893ccfSSadaf Ebrahimi
4991*b7893ccfSSadaf Ebrahimi void ChangeBlockAllocation(
4992*b7893ccfSSadaf Ebrahimi VmaAllocator hAllocator,
4993*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* block,
4994*b7893ccfSSadaf Ebrahimi VkDeviceSize offset);
4995*b7893ccfSSadaf Ebrahimi
4996*b7893ccfSSadaf Ebrahimi void ChangeSize(VkDeviceSize newSize);
4997*b7893ccfSSadaf Ebrahimi void ChangeOffset(VkDeviceSize newOffset);
4998*b7893ccfSSadaf Ebrahimi
4999*b7893ccfSSadaf Ebrahimi // pMappedData not null means allocation is created with MAPPED flag.
InitDedicatedAllocationVmaAllocation_T5000*b7893ccfSSadaf Ebrahimi void InitDedicatedAllocation(
5001*b7893ccfSSadaf Ebrahimi uint32_t memoryTypeIndex,
5002*b7893ccfSSadaf Ebrahimi VkDeviceMemory hMemory,
5003*b7893ccfSSadaf Ebrahimi VmaSuballocationType suballocationType,
5004*b7893ccfSSadaf Ebrahimi void* pMappedData,
5005*b7893ccfSSadaf Ebrahimi VkDeviceSize size)
5006*b7893ccfSSadaf Ebrahimi {
5007*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5008*b7893ccfSSadaf Ebrahimi VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5009*b7893ccfSSadaf Ebrahimi m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5010*b7893ccfSSadaf Ebrahimi m_Alignment = 0;
5011*b7893ccfSSadaf Ebrahimi m_Size = size;
5012*b7893ccfSSadaf Ebrahimi m_SuballocationType = (uint8_t)suballocationType;
5013*b7893ccfSSadaf Ebrahimi m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5014*b7893ccfSSadaf Ebrahimi m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
5015*b7893ccfSSadaf Ebrahimi m_DedicatedAllocation.m_hMemory = hMemory;
5016*b7893ccfSSadaf Ebrahimi m_DedicatedAllocation.m_pMappedData = pMappedData;
5017*b7893ccfSSadaf Ebrahimi }
5018*b7893ccfSSadaf Ebrahimi
GetTypeVmaAllocation_T5019*b7893ccfSSadaf Ebrahimi ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
GetAlignmentVmaAllocation_T5020*b7893ccfSSadaf Ebrahimi VkDeviceSize GetAlignment() const { return m_Alignment; }
GetSizeVmaAllocation_T5021*b7893ccfSSadaf Ebrahimi VkDeviceSize GetSize() const { return m_Size; }
IsUserDataStringVmaAllocation_T5022*b7893ccfSSadaf Ebrahimi bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
GetUserDataVmaAllocation_T5023*b7893ccfSSadaf Ebrahimi void* GetUserData() const { return m_pUserData; }
5024*b7893ccfSSadaf Ebrahimi void SetUserData(VmaAllocator hAllocator, void* pUserData);
GetSuballocationTypeVmaAllocation_T5025*b7893ccfSSadaf Ebrahimi VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5026*b7893ccfSSadaf Ebrahimi
GetBlockVmaAllocation_T5027*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* GetBlock() const
5028*b7893ccfSSadaf Ebrahimi {
5029*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5030*b7893ccfSSadaf Ebrahimi return m_BlockAllocation.m_Block;
5031*b7893ccfSSadaf Ebrahimi }
5032*b7893ccfSSadaf Ebrahimi VkDeviceSize GetOffset() const;
5033*b7893ccfSSadaf Ebrahimi VkDeviceMemory GetMemory() const;
5034*b7893ccfSSadaf Ebrahimi uint32_t GetMemoryTypeIndex() const;
IsPersistentMapVmaAllocation_T5035*b7893ccfSSadaf Ebrahimi bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5036*b7893ccfSSadaf Ebrahimi void* GetMappedData() const;
5037*b7893ccfSSadaf Ebrahimi bool CanBecomeLost() const;
5038*b7893ccfSSadaf Ebrahimi VmaPool GetPool() const;
5039*b7893ccfSSadaf Ebrahimi
GetLastUseFrameIndexVmaAllocation_T5040*b7893ccfSSadaf Ebrahimi uint32_t GetLastUseFrameIndex() const
5041*b7893ccfSSadaf Ebrahimi {
5042*b7893ccfSSadaf Ebrahimi return m_LastUseFrameIndex.load();
5043*b7893ccfSSadaf Ebrahimi }
CompareExchangeLastUseFrameIndexVmaAllocation_T5044*b7893ccfSSadaf Ebrahimi bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5045*b7893ccfSSadaf Ebrahimi {
5046*b7893ccfSSadaf Ebrahimi return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5047*b7893ccfSSadaf Ebrahimi }
5048*b7893ccfSSadaf Ebrahimi /*
5049*b7893ccfSSadaf Ebrahimi - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5050*b7893ccfSSadaf Ebrahimi makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5051*b7893ccfSSadaf Ebrahimi - Else, returns false.
5052*b7893ccfSSadaf Ebrahimi
5053*b7893ccfSSadaf Ebrahimi If hAllocation is already lost, assert - you should not call it then.
5054*b7893ccfSSadaf Ebrahimi If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5055*b7893ccfSSadaf Ebrahimi */
5056*b7893ccfSSadaf Ebrahimi bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5057*b7893ccfSSadaf Ebrahimi
DedicatedAllocCalcStatsInfoVmaAllocation_T5058*b7893ccfSSadaf Ebrahimi void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5059*b7893ccfSSadaf Ebrahimi {
5060*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5061*b7893ccfSSadaf Ebrahimi outInfo.blockCount = 1;
5062*b7893ccfSSadaf Ebrahimi outInfo.allocationCount = 1;
5063*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeCount = 0;
5064*b7893ccfSSadaf Ebrahimi outInfo.usedBytes = m_Size;
5065*b7893ccfSSadaf Ebrahimi outInfo.unusedBytes = 0;
5066*b7893ccfSSadaf Ebrahimi outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5067*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMin = UINT64_MAX;
5068*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMax = 0;
5069*b7893ccfSSadaf Ebrahimi }
5070*b7893ccfSSadaf Ebrahimi
5071*b7893ccfSSadaf Ebrahimi void BlockAllocMap();
5072*b7893ccfSSadaf Ebrahimi void BlockAllocUnmap();
5073*b7893ccfSSadaf Ebrahimi VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5074*b7893ccfSSadaf Ebrahimi void DedicatedAllocUnmap(VmaAllocator hAllocator);
5075*b7893ccfSSadaf Ebrahimi
5076*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
GetCreationFrameIndexVmaAllocation_T5077*b7893ccfSSadaf Ebrahimi uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
GetBufferImageUsageVmaAllocation_T5078*b7893ccfSSadaf Ebrahimi uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5079*b7893ccfSSadaf Ebrahimi
InitBufferImageUsageVmaAllocation_T5080*b7893ccfSSadaf Ebrahimi void InitBufferImageUsage(uint32_t bufferImageUsage)
5081*b7893ccfSSadaf Ebrahimi {
5082*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_BufferImageUsage == 0);
5083*b7893ccfSSadaf Ebrahimi m_BufferImageUsage = bufferImageUsage;
5084*b7893ccfSSadaf Ebrahimi }
5085*b7893ccfSSadaf Ebrahimi
5086*b7893ccfSSadaf Ebrahimi void PrintParameters(class VmaJsonWriter& json) const;
5087*b7893ccfSSadaf Ebrahimi #endif
5088*b7893ccfSSadaf Ebrahimi
5089*b7893ccfSSadaf Ebrahimi private:
5090*b7893ccfSSadaf Ebrahimi VkDeviceSize m_Alignment;
5091*b7893ccfSSadaf Ebrahimi VkDeviceSize m_Size;
5092*b7893ccfSSadaf Ebrahimi void* m_pUserData;
5093*b7893ccfSSadaf Ebrahimi VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5094*b7893ccfSSadaf Ebrahimi uint8_t m_Type; // ALLOCATION_TYPE
5095*b7893ccfSSadaf Ebrahimi uint8_t m_SuballocationType; // VmaSuballocationType
5096*b7893ccfSSadaf Ebrahimi // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5097*b7893ccfSSadaf Ebrahimi // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5098*b7893ccfSSadaf Ebrahimi uint8_t m_MapCount;
5099*b7893ccfSSadaf Ebrahimi uint8_t m_Flags; // enum FLAGS
5100*b7893ccfSSadaf Ebrahimi
5101*b7893ccfSSadaf Ebrahimi // Allocation out of VmaDeviceMemoryBlock.
5102*b7893ccfSSadaf Ebrahimi struct BlockAllocation
5103*b7893ccfSSadaf Ebrahimi {
5104*b7893ccfSSadaf Ebrahimi VmaPool m_hPool; // Null if belongs to general memory.
5105*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* m_Block;
5106*b7893ccfSSadaf Ebrahimi VkDeviceSize m_Offset;
5107*b7893ccfSSadaf Ebrahimi bool m_CanBecomeLost;
5108*b7893ccfSSadaf Ebrahimi };
5109*b7893ccfSSadaf Ebrahimi
5110*b7893ccfSSadaf Ebrahimi // Allocation for an object that has its own private VkDeviceMemory.
5111*b7893ccfSSadaf Ebrahimi struct DedicatedAllocation
5112*b7893ccfSSadaf Ebrahimi {
5113*b7893ccfSSadaf Ebrahimi uint32_t m_MemoryTypeIndex;
5114*b7893ccfSSadaf Ebrahimi VkDeviceMemory m_hMemory;
5115*b7893ccfSSadaf Ebrahimi void* m_pMappedData; // Not null means memory is mapped.
5116*b7893ccfSSadaf Ebrahimi };
5117*b7893ccfSSadaf Ebrahimi
5118*b7893ccfSSadaf Ebrahimi union
5119*b7893ccfSSadaf Ebrahimi {
5120*b7893ccfSSadaf Ebrahimi // Allocation out of VmaDeviceMemoryBlock.
5121*b7893ccfSSadaf Ebrahimi BlockAllocation m_BlockAllocation;
5122*b7893ccfSSadaf Ebrahimi // Allocation for an object that has its own private VkDeviceMemory.
5123*b7893ccfSSadaf Ebrahimi DedicatedAllocation m_DedicatedAllocation;
5124*b7893ccfSSadaf Ebrahimi };
5125*b7893ccfSSadaf Ebrahimi
5126*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
5127*b7893ccfSSadaf Ebrahimi uint32_t m_CreationFrameIndex;
5128*b7893ccfSSadaf Ebrahimi uint32_t m_BufferImageUsage; // 0 if unknown.
5129*b7893ccfSSadaf Ebrahimi #endif
5130*b7893ccfSSadaf Ebrahimi
5131*b7893ccfSSadaf Ebrahimi void FreeUserDataString(VmaAllocator hAllocator);
5132*b7893ccfSSadaf Ebrahimi };
5133*b7893ccfSSadaf Ebrahimi
5134*b7893ccfSSadaf Ebrahimi /*
5135*b7893ccfSSadaf Ebrahimi Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5136*b7893ccfSSadaf Ebrahimi allocated memory block or free.
5137*b7893ccfSSadaf Ebrahimi */
5138*b7893ccfSSadaf Ebrahimi struct VmaSuballocation
5139*b7893ccfSSadaf Ebrahimi {
5140*b7893ccfSSadaf Ebrahimi VkDeviceSize offset;
5141*b7893ccfSSadaf Ebrahimi VkDeviceSize size;
5142*b7893ccfSSadaf Ebrahimi VmaAllocation hAllocation;
5143*b7893ccfSSadaf Ebrahimi VmaSuballocationType type;
5144*b7893ccfSSadaf Ebrahimi };
5145*b7893ccfSSadaf Ebrahimi
5146*b7893ccfSSadaf Ebrahimi // Comparator for offsets.
5147*b7893ccfSSadaf Ebrahimi struct VmaSuballocationOffsetLess
5148*b7893ccfSSadaf Ebrahimi {
operatorVmaSuballocationOffsetLess5149*b7893ccfSSadaf Ebrahimi bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5150*b7893ccfSSadaf Ebrahimi {
5151*b7893ccfSSadaf Ebrahimi return lhs.offset < rhs.offset;
5152*b7893ccfSSadaf Ebrahimi }
5153*b7893ccfSSadaf Ebrahimi };
5154*b7893ccfSSadaf Ebrahimi struct VmaSuballocationOffsetGreater
5155*b7893ccfSSadaf Ebrahimi {
operatorVmaSuballocationOffsetGreater5156*b7893ccfSSadaf Ebrahimi bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5157*b7893ccfSSadaf Ebrahimi {
5158*b7893ccfSSadaf Ebrahimi return lhs.offset > rhs.offset;
5159*b7893ccfSSadaf Ebrahimi }
5160*b7893ccfSSadaf Ebrahimi };
5161*b7893ccfSSadaf Ebrahimi
5162*b7893ccfSSadaf Ebrahimi typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5163*b7893ccfSSadaf Ebrahimi
5164*b7893ccfSSadaf Ebrahimi // Cost of one additional allocation lost, as equivalent in bytes.
5165*b7893ccfSSadaf Ebrahimi static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5166*b7893ccfSSadaf Ebrahimi
5167*b7893ccfSSadaf Ebrahimi /*
5168*b7893ccfSSadaf Ebrahimi Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5169*b7893ccfSSadaf Ebrahimi
5170*b7893ccfSSadaf Ebrahimi If canMakeOtherLost was false:
5171*b7893ccfSSadaf Ebrahimi - item points to a FREE suballocation.
5172*b7893ccfSSadaf Ebrahimi - itemsToMakeLostCount is 0.
5173*b7893ccfSSadaf Ebrahimi
5174*b7893ccfSSadaf Ebrahimi If canMakeOtherLost was true:
5175*b7893ccfSSadaf Ebrahimi - item points to first of sequence of suballocations, which are either FREE,
5176*b7893ccfSSadaf Ebrahimi or point to VmaAllocations that can become lost.
5177*b7893ccfSSadaf Ebrahimi - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5178*b7893ccfSSadaf Ebrahimi the requested allocation to succeed.
5179*b7893ccfSSadaf Ebrahimi */
5180*b7893ccfSSadaf Ebrahimi struct VmaAllocationRequest
5181*b7893ccfSSadaf Ebrahimi {
5182*b7893ccfSSadaf Ebrahimi VkDeviceSize offset;
5183*b7893ccfSSadaf Ebrahimi VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5184*b7893ccfSSadaf Ebrahimi VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5185*b7893ccfSSadaf Ebrahimi VmaSuballocationList::iterator item;
5186*b7893ccfSSadaf Ebrahimi size_t itemsToMakeLostCount;
5187*b7893ccfSSadaf Ebrahimi void* customData;
5188*b7893ccfSSadaf Ebrahimi
CalcCostVmaAllocationRequest5189*b7893ccfSSadaf Ebrahimi VkDeviceSize CalcCost() const
5190*b7893ccfSSadaf Ebrahimi {
5191*b7893ccfSSadaf Ebrahimi return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5192*b7893ccfSSadaf Ebrahimi }
5193*b7893ccfSSadaf Ebrahimi };
5194*b7893ccfSSadaf Ebrahimi
5195*b7893ccfSSadaf Ebrahimi /*
5196*b7893ccfSSadaf Ebrahimi Data structure used for bookkeeping of allocations and unused ranges of memory
5197*b7893ccfSSadaf Ebrahimi in a single VkDeviceMemory block.
5198*b7893ccfSSadaf Ebrahimi */
5199*b7893ccfSSadaf Ebrahimi class VmaBlockMetadata
5200*b7893ccfSSadaf Ebrahimi {
5201*b7893ccfSSadaf Ebrahimi public:
5202*b7893ccfSSadaf Ebrahimi VmaBlockMetadata(VmaAllocator hAllocator);
~VmaBlockMetadata()5203*b7893ccfSSadaf Ebrahimi virtual ~VmaBlockMetadata() { }
Init(VkDeviceSize size)5204*b7893ccfSSadaf Ebrahimi virtual void Init(VkDeviceSize size) { m_Size = size; }
5205*b7893ccfSSadaf Ebrahimi
5206*b7893ccfSSadaf Ebrahimi // Validates all data structures inside this object. If not valid, returns false.
5207*b7893ccfSSadaf Ebrahimi virtual bool Validate() const = 0;
GetSize()5208*b7893ccfSSadaf Ebrahimi VkDeviceSize GetSize() const { return m_Size; }
5209*b7893ccfSSadaf Ebrahimi virtual size_t GetAllocationCount() const = 0;
5210*b7893ccfSSadaf Ebrahimi virtual VkDeviceSize GetSumFreeSize() const = 0;
5211*b7893ccfSSadaf Ebrahimi virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5212*b7893ccfSSadaf Ebrahimi // Returns true if this block is empty - contains only single free suballocation.
5213*b7893ccfSSadaf Ebrahimi virtual bool IsEmpty() const = 0;
5214*b7893ccfSSadaf Ebrahimi
5215*b7893ccfSSadaf Ebrahimi virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5216*b7893ccfSSadaf Ebrahimi // Shouldn't modify blockCount.
5217*b7893ccfSSadaf Ebrahimi virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5218*b7893ccfSSadaf Ebrahimi
5219*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
5220*b7893ccfSSadaf Ebrahimi virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5221*b7893ccfSSadaf Ebrahimi #endif
5222*b7893ccfSSadaf Ebrahimi
5223*b7893ccfSSadaf Ebrahimi // Tries to find a place for suballocation with given parameters inside this block.
5224*b7893ccfSSadaf Ebrahimi // If succeeded, fills pAllocationRequest and returns true.
5225*b7893ccfSSadaf Ebrahimi // If failed, returns false.
5226*b7893ccfSSadaf Ebrahimi virtual bool CreateAllocationRequest(
5227*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
5228*b7893ccfSSadaf Ebrahimi uint32_t frameInUseCount,
5229*b7893ccfSSadaf Ebrahimi VkDeviceSize bufferImageGranularity,
5230*b7893ccfSSadaf Ebrahimi VkDeviceSize allocSize,
5231*b7893ccfSSadaf Ebrahimi VkDeviceSize allocAlignment,
5232*b7893ccfSSadaf Ebrahimi bool upperAddress,
5233*b7893ccfSSadaf Ebrahimi VmaSuballocationType allocType,
5234*b7893ccfSSadaf Ebrahimi bool canMakeOtherLost,
5235*b7893ccfSSadaf Ebrahimi // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5236*b7893ccfSSadaf Ebrahimi uint32_t strategy,
5237*b7893ccfSSadaf Ebrahimi VmaAllocationRequest* pAllocationRequest) = 0;
5238*b7893ccfSSadaf Ebrahimi
5239*b7893ccfSSadaf Ebrahimi virtual bool MakeRequestedAllocationsLost(
5240*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
5241*b7893ccfSSadaf Ebrahimi uint32_t frameInUseCount,
5242*b7893ccfSSadaf Ebrahimi VmaAllocationRequest* pAllocationRequest) = 0;
5243*b7893ccfSSadaf Ebrahimi
5244*b7893ccfSSadaf Ebrahimi virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5245*b7893ccfSSadaf Ebrahimi
5246*b7893ccfSSadaf Ebrahimi virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5247*b7893ccfSSadaf Ebrahimi
5248*b7893ccfSSadaf Ebrahimi // Makes actual allocation based on request. Request must already be checked and valid.
5249*b7893ccfSSadaf Ebrahimi virtual void Alloc(
5250*b7893ccfSSadaf Ebrahimi const VmaAllocationRequest& request,
5251*b7893ccfSSadaf Ebrahimi VmaSuballocationType type,
5252*b7893ccfSSadaf Ebrahimi VkDeviceSize allocSize,
5253*b7893ccfSSadaf Ebrahimi bool upperAddress,
5254*b7893ccfSSadaf Ebrahimi VmaAllocation hAllocation) = 0;
5255*b7893ccfSSadaf Ebrahimi
5256*b7893ccfSSadaf Ebrahimi // Frees suballocation assigned to given memory region.
5257*b7893ccfSSadaf Ebrahimi virtual void Free(const VmaAllocation allocation) = 0;
5258*b7893ccfSSadaf Ebrahimi virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5259*b7893ccfSSadaf Ebrahimi
5260*b7893ccfSSadaf Ebrahimi // Tries to resize (grow or shrink) space for given allocation, in place.
ResizeAllocation(const VmaAllocation alloc,VkDeviceSize newSize)5261*b7893ccfSSadaf Ebrahimi virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5262*b7893ccfSSadaf Ebrahimi
5263*b7893ccfSSadaf Ebrahimi protected:
GetAllocationCallbacks()5264*b7893ccfSSadaf Ebrahimi const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5265*b7893ccfSSadaf Ebrahimi
5266*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
5267*b7893ccfSSadaf Ebrahimi void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5268*b7893ccfSSadaf Ebrahimi VkDeviceSize unusedBytes,
5269*b7893ccfSSadaf Ebrahimi size_t allocationCount,
5270*b7893ccfSSadaf Ebrahimi size_t unusedRangeCount) const;
5271*b7893ccfSSadaf Ebrahimi void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5272*b7893ccfSSadaf Ebrahimi VkDeviceSize offset,
5273*b7893ccfSSadaf Ebrahimi VmaAllocation hAllocation) const;
5274*b7893ccfSSadaf Ebrahimi void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5275*b7893ccfSSadaf Ebrahimi VkDeviceSize offset,
5276*b7893ccfSSadaf Ebrahimi VkDeviceSize size) const;
5277*b7893ccfSSadaf Ebrahimi void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5278*b7893ccfSSadaf Ebrahimi #endif
5279*b7893ccfSSadaf Ebrahimi
5280*b7893ccfSSadaf Ebrahimi private:
5281*b7893ccfSSadaf Ebrahimi VkDeviceSize m_Size;
5282*b7893ccfSSadaf Ebrahimi const VkAllocationCallbacks* m_pAllocationCallbacks;
5283*b7893ccfSSadaf Ebrahimi };
5284*b7893ccfSSadaf Ebrahimi
5285*b7893ccfSSadaf Ebrahimi #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5286*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "Validation failed: " #cond); \
5287*b7893ccfSSadaf Ebrahimi return false; \
5288*b7893ccfSSadaf Ebrahimi } } while(false)
5289*b7893ccfSSadaf Ebrahimi
5290*b7893ccfSSadaf Ebrahimi class VmaBlockMetadata_Generic : public VmaBlockMetadata
5291*b7893ccfSSadaf Ebrahimi {
5292*b7893ccfSSadaf Ebrahimi VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5293*b7893ccfSSadaf Ebrahimi public:
5294*b7893ccfSSadaf Ebrahimi VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5295*b7893ccfSSadaf Ebrahimi virtual ~VmaBlockMetadata_Generic();
5296*b7893ccfSSadaf Ebrahimi virtual void Init(VkDeviceSize size);
5297*b7893ccfSSadaf Ebrahimi
5298*b7893ccfSSadaf Ebrahimi virtual bool Validate() const;
GetAllocationCount()5299*b7893ccfSSadaf Ebrahimi virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
GetSumFreeSize()5300*b7893ccfSSadaf Ebrahimi virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5301*b7893ccfSSadaf Ebrahimi virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5302*b7893ccfSSadaf Ebrahimi virtual bool IsEmpty() const;
5303*b7893ccfSSadaf Ebrahimi
5304*b7893ccfSSadaf Ebrahimi virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5305*b7893ccfSSadaf Ebrahimi virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5306*b7893ccfSSadaf Ebrahimi
5307*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
5308*b7893ccfSSadaf Ebrahimi virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5309*b7893ccfSSadaf Ebrahimi #endif
5310*b7893ccfSSadaf Ebrahimi
5311*b7893ccfSSadaf Ebrahimi virtual bool CreateAllocationRequest(
5312*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
5313*b7893ccfSSadaf Ebrahimi uint32_t frameInUseCount,
5314*b7893ccfSSadaf Ebrahimi VkDeviceSize bufferImageGranularity,
5315*b7893ccfSSadaf Ebrahimi VkDeviceSize allocSize,
5316*b7893ccfSSadaf Ebrahimi VkDeviceSize allocAlignment,
5317*b7893ccfSSadaf Ebrahimi bool upperAddress,
5318*b7893ccfSSadaf Ebrahimi VmaSuballocationType allocType,
5319*b7893ccfSSadaf Ebrahimi bool canMakeOtherLost,
5320*b7893ccfSSadaf Ebrahimi uint32_t strategy,
5321*b7893ccfSSadaf Ebrahimi VmaAllocationRequest* pAllocationRequest);
5322*b7893ccfSSadaf Ebrahimi
5323*b7893ccfSSadaf Ebrahimi virtual bool MakeRequestedAllocationsLost(
5324*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
5325*b7893ccfSSadaf Ebrahimi uint32_t frameInUseCount,
5326*b7893ccfSSadaf Ebrahimi VmaAllocationRequest* pAllocationRequest);
5327*b7893ccfSSadaf Ebrahimi
5328*b7893ccfSSadaf Ebrahimi virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5329*b7893ccfSSadaf Ebrahimi
5330*b7893ccfSSadaf Ebrahimi virtual VkResult CheckCorruption(const void* pBlockData);
5331*b7893ccfSSadaf Ebrahimi
5332*b7893ccfSSadaf Ebrahimi virtual void Alloc(
5333*b7893ccfSSadaf Ebrahimi const VmaAllocationRequest& request,
5334*b7893ccfSSadaf Ebrahimi VmaSuballocationType type,
5335*b7893ccfSSadaf Ebrahimi VkDeviceSize allocSize,
5336*b7893ccfSSadaf Ebrahimi bool upperAddress,
5337*b7893ccfSSadaf Ebrahimi VmaAllocation hAllocation);
5338*b7893ccfSSadaf Ebrahimi
5339*b7893ccfSSadaf Ebrahimi virtual void Free(const VmaAllocation allocation);
5340*b7893ccfSSadaf Ebrahimi virtual void FreeAtOffset(VkDeviceSize offset);
5341*b7893ccfSSadaf Ebrahimi
5342*b7893ccfSSadaf Ebrahimi virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5343*b7893ccfSSadaf Ebrahimi
5344*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
5345*b7893ccfSSadaf Ebrahimi // For defragmentation
5346*b7893ccfSSadaf Ebrahimi
5347*b7893ccfSSadaf Ebrahimi bool IsBufferImageGranularityConflictPossible(
5348*b7893ccfSSadaf Ebrahimi VkDeviceSize bufferImageGranularity,
5349*b7893ccfSSadaf Ebrahimi VmaSuballocationType& inOutPrevSuballocType) const;
5350*b7893ccfSSadaf Ebrahimi
5351*b7893ccfSSadaf Ebrahimi private:
5352*b7893ccfSSadaf Ebrahimi friend class VmaDefragmentationAlgorithm_Generic;
5353*b7893ccfSSadaf Ebrahimi friend class VmaDefragmentationAlgorithm_Fast;
5354*b7893ccfSSadaf Ebrahimi
5355*b7893ccfSSadaf Ebrahimi uint32_t m_FreeCount;
5356*b7893ccfSSadaf Ebrahimi VkDeviceSize m_SumFreeSize;
5357*b7893ccfSSadaf Ebrahimi VmaSuballocationList m_Suballocations;
5358*b7893ccfSSadaf Ebrahimi // Suballocations that are free and have size greater than certain threshold.
5359*b7893ccfSSadaf Ebrahimi // Sorted by size, ascending.
5360*b7893ccfSSadaf Ebrahimi VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5361*b7893ccfSSadaf Ebrahimi
5362*b7893ccfSSadaf Ebrahimi bool ValidateFreeSuballocationList() const;
5363*b7893ccfSSadaf Ebrahimi
5364*b7893ccfSSadaf Ebrahimi // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5365*b7893ccfSSadaf Ebrahimi // If yes, fills pOffset and returns true. If no, returns false.
5366*b7893ccfSSadaf Ebrahimi bool CheckAllocation(
5367*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
5368*b7893ccfSSadaf Ebrahimi uint32_t frameInUseCount,
5369*b7893ccfSSadaf Ebrahimi VkDeviceSize bufferImageGranularity,
5370*b7893ccfSSadaf Ebrahimi VkDeviceSize allocSize,
5371*b7893ccfSSadaf Ebrahimi VkDeviceSize allocAlignment,
5372*b7893ccfSSadaf Ebrahimi VmaSuballocationType allocType,
5373*b7893ccfSSadaf Ebrahimi VmaSuballocationList::const_iterator suballocItem,
5374*b7893ccfSSadaf Ebrahimi bool canMakeOtherLost,
5375*b7893ccfSSadaf Ebrahimi VkDeviceSize* pOffset,
5376*b7893ccfSSadaf Ebrahimi size_t* itemsToMakeLostCount,
5377*b7893ccfSSadaf Ebrahimi VkDeviceSize* pSumFreeSize,
5378*b7893ccfSSadaf Ebrahimi VkDeviceSize* pSumItemSize) const;
5379*b7893ccfSSadaf Ebrahimi // Given free suballocation, it merges it with following one, which must also be free.
5380*b7893ccfSSadaf Ebrahimi void MergeFreeWithNext(VmaSuballocationList::iterator item);
5381*b7893ccfSSadaf Ebrahimi // Releases given suballocation, making it free.
5382*b7893ccfSSadaf Ebrahimi // Merges it with adjacent free suballocations if applicable.
5383*b7893ccfSSadaf Ebrahimi // Returns iterator to new free suballocation at this place.
5384*b7893ccfSSadaf Ebrahimi VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5385*b7893ccfSSadaf Ebrahimi // Given free suballocation, it inserts it into sorted list of
5386*b7893ccfSSadaf Ebrahimi // m_FreeSuballocationsBySize if it's suitable.
5387*b7893ccfSSadaf Ebrahimi void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5388*b7893ccfSSadaf Ebrahimi // Given free suballocation, it removes it from sorted list of
5389*b7893ccfSSadaf Ebrahimi // m_FreeSuballocationsBySize if it's suitable.
5390*b7893ccfSSadaf Ebrahimi void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5391*b7893ccfSSadaf Ebrahimi };
5392*b7893ccfSSadaf Ebrahimi
5393*b7893ccfSSadaf Ebrahimi /*
5394*b7893ccfSSadaf Ebrahimi Allocations and their references in internal data structure look like this:
5395*b7893ccfSSadaf Ebrahimi
5396*b7893ccfSSadaf Ebrahimi if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5397*b7893ccfSSadaf Ebrahimi
5398*b7893ccfSSadaf Ebrahimi 0 +-------+
5399*b7893ccfSSadaf Ebrahimi | |
5400*b7893ccfSSadaf Ebrahimi | |
5401*b7893ccfSSadaf Ebrahimi | |
5402*b7893ccfSSadaf Ebrahimi +-------+
5403*b7893ccfSSadaf Ebrahimi | Alloc | 1st[m_1stNullItemsBeginCount]
5404*b7893ccfSSadaf Ebrahimi +-------+
5405*b7893ccfSSadaf Ebrahimi | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5406*b7893ccfSSadaf Ebrahimi +-------+
5407*b7893ccfSSadaf Ebrahimi | ... |
5408*b7893ccfSSadaf Ebrahimi +-------+
5409*b7893ccfSSadaf Ebrahimi | Alloc | 1st[1st.size() - 1]
5410*b7893ccfSSadaf Ebrahimi +-------+
5411*b7893ccfSSadaf Ebrahimi | |
5412*b7893ccfSSadaf Ebrahimi | |
5413*b7893ccfSSadaf Ebrahimi | |
5414*b7893ccfSSadaf Ebrahimi GetSize() +-------+
5415*b7893ccfSSadaf Ebrahimi
5416*b7893ccfSSadaf Ebrahimi if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5417*b7893ccfSSadaf Ebrahimi
5418*b7893ccfSSadaf Ebrahimi 0 +-------+
5419*b7893ccfSSadaf Ebrahimi | Alloc | 2nd[0]
5420*b7893ccfSSadaf Ebrahimi +-------+
5421*b7893ccfSSadaf Ebrahimi | Alloc | 2nd[1]
5422*b7893ccfSSadaf Ebrahimi +-------+
5423*b7893ccfSSadaf Ebrahimi | ... |
5424*b7893ccfSSadaf Ebrahimi +-------+
5425*b7893ccfSSadaf Ebrahimi | Alloc | 2nd[2nd.size() - 1]
5426*b7893ccfSSadaf Ebrahimi +-------+
5427*b7893ccfSSadaf Ebrahimi | |
5428*b7893ccfSSadaf Ebrahimi | |
5429*b7893ccfSSadaf Ebrahimi | |
5430*b7893ccfSSadaf Ebrahimi +-------+
5431*b7893ccfSSadaf Ebrahimi | Alloc | 1st[m_1stNullItemsBeginCount]
5432*b7893ccfSSadaf Ebrahimi +-------+
5433*b7893ccfSSadaf Ebrahimi | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5434*b7893ccfSSadaf Ebrahimi +-------+
5435*b7893ccfSSadaf Ebrahimi | ... |
5436*b7893ccfSSadaf Ebrahimi +-------+
5437*b7893ccfSSadaf Ebrahimi | Alloc | 1st[1st.size() - 1]
5438*b7893ccfSSadaf Ebrahimi +-------+
5439*b7893ccfSSadaf Ebrahimi | |
5440*b7893ccfSSadaf Ebrahimi GetSize() +-------+
5441*b7893ccfSSadaf Ebrahimi
5442*b7893ccfSSadaf Ebrahimi if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5443*b7893ccfSSadaf Ebrahimi
5444*b7893ccfSSadaf Ebrahimi 0 +-------+
5445*b7893ccfSSadaf Ebrahimi | |
5446*b7893ccfSSadaf Ebrahimi | |
5447*b7893ccfSSadaf Ebrahimi | |
5448*b7893ccfSSadaf Ebrahimi +-------+
5449*b7893ccfSSadaf Ebrahimi | Alloc | 1st[m_1stNullItemsBeginCount]
5450*b7893ccfSSadaf Ebrahimi +-------+
5451*b7893ccfSSadaf Ebrahimi | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5452*b7893ccfSSadaf Ebrahimi +-------+
5453*b7893ccfSSadaf Ebrahimi | ... |
5454*b7893ccfSSadaf Ebrahimi +-------+
5455*b7893ccfSSadaf Ebrahimi | Alloc | 1st[1st.size() - 1]
5456*b7893ccfSSadaf Ebrahimi +-------+
5457*b7893ccfSSadaf Ebrahimi | |
5458*b7893ccfSSadaf Ebrahimi | |
5459*b7893ccfSSadaf Ebrahimi | |
5460*b7893ccfSSadaf Ebrahimi +-------+
5461*b7893ccfSSadaf Ebrahimi | Alloc | 2nd[2nd.size() - 1]
5462*b7893ccfSSadaf Ebrahimi +-------+
5463*b7893ccfSSadaf Ebrahimi | ... |
5464*b7893ccfSSadaf Ebrahimi +-------+
5465*b7893ccfSSadaf Ebrahimi | Alloc | 2nd[1]
5466*b7893ccfSSadaf Ebrahimi +-------+
5467*b7893ccfSSadaf Ebrahimi | Alloc | 2nd[0]
5468*b7893ccfSSadaf Ebrahimi GetSize() +-------+
5469*b7893ccfSSadaf Ebrahimi
5470*b7893ccfSSadaf Ebrahimi */
5471*b7893ccfSSadaf Ebrahimi class VmaBlockMetadata_Linear : public VmaBlockMetadata
5472*b7893ccfSSadaf Ebrahimi {
5473*b7893ccfSSadaf Ebrahimi VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5474*b7893ccfSSadaf Ebrahimi public:
5475*b7893ccfSSadaf Ebrahimi VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5476*b7893ccfSSadaf Ebrahimi virtual ~VmaBlockMetadata_Linear();
5477*b7893ccfSSadaf Ebrahimi virtual void Init(VkDeviceSize size);
5478*b7893ccfSSadaf Ebrahimi
5479*b7893ccfSSadaf Ebrahimi virtual bool Validate() const;
5480*b7893ccfSSadaf Ebrahimi virtual size_t GetAllocationCount() const;
GetSumFreeSize()5481*b7893ccfSSadaf Ebrahimi virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5482*b7893ccfSSadaf Ebrahimi virtual VkDeviceSize GetUnusedRangeSizeMax() const;
IsEmpty()5483*b7893ccfSSadaf Ebrahimi virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5484*b7893ccfSSadaf Ebrahimi
5485*b7893ccfSSadaf Ebrahimi virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5486*b7893ccfSSadaf Ebrahimi virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5487*b7893ccfSSadaf Ebrahimi
5488*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
5489*b7893ccfSSadaf Ebrahimi virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5490*b7893ccfSSadaf Ebrahimi #endif
5491*b7893ccfSSadaf Ebrahimi
5492*b7893ccfSSadaf Ebrahimi virtual bool CreateAllocationRequest(
5493*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
5494*b7893ccfSSadaf Ebrahimi uint32_t frameInUseCount,
5495*b7893ccfSSadaf Ebrahimi VkDeviceSize bufferImageGranularity,
5496*b7893ccfSSadaf Ebrahimi VkDeviceSize allocSize,
5497*b7893ccfSSadaf Ebrahimi VkDeviceSize allocAlignment,
5498*b7893ccfSSadaf Ebrahimi bool upperAddress,
5499*b7893ccfSSadaf Ebrahimi VmaSuballocationType allocType,
5500*b7893ccfSSadaf Ebrahimi bool canMakeOtherLost,
5501*b7893ccfSSadaf Ebrahimi uint32_t strategy,
5502*b7893ccfSSadaf Ebrahimi VmaAllocationRequest* pAllocationRequest);
5503*b7893ccfSSadaf Ebrahimi
5504*b7893ccfSSadaf Ebrahimi virtual bool MakeRequestedAllocationsLost(
5505*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
5506*b7893ccfSSadaf Ebrahimi uint32_t frameInUseCount,
5507*b7893ccfSSadaf Ebrahimi VmaAllocationRequest* pAllocationRequest);
5508*b7893ccfSSadaf Ebrahimi
5509*b7893ccfSSadaf Ebrahimi virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5510*b7893ccfSSadaf Ebrahimi
5511*b7893ccfSSadaf Ebrahimi virtual VkResult CheckCorruption(const void* pBlockData);
5512*b7893ccfSSadaf Ebrahimi
5513*b7893ccfSSadaf Ebrahimi virtual void Alloc(
5514*b7893ccfSSadaf Ebrahimi const VmaAllocationRequest& request,
5515*b7893ccfSSadaf Ebrahimi VmaSuballocationType type,
5516*b7893ccfSSadaf Ebrahimi VkDeviceSize allocSize,
5517*b7893ccfSSadaf Ebrahimi bool upperAddress,
5518*b7893ccfSSadaf Ebrahimi VmaAllocation hAllocation);
5519*b7893ccfSSadaf Ebrahimi
5520*b7893ccfSSadaf Ebrahimi virtual void Free(const VmaAllocation allocation);
5521*b7893ccfSSadaf Ebrahimi virtual void FreeAtOffset(VkDeviceSize offset);
5522*b7893ccfSSadaf Ebrahimi
5523*b7893ccfSSadaf Ebrahimi private:
5524*b7893ccfSSadaf Ebrahimi /*
5525*b7893ccfSSadaf Ebrahimi There are two suballocation vectors, used in ping-pong way.
5526*b7893ccfSSadaf Ebrahimi The one with index m_1stVectorIndex is called 1st.
5527*b7893ccfSSadaf Ebrahimi The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5528*b7893ccfSSadaf Ebrahimi 2nd can be non-empty only when 1st is not empty.
5529*b7893ccfSSadaf Ebrahimi When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5530*b7893ccfSSadaf Ebrahimi */
5531*b7893ccfSSadaf Ebrahimi typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5532*b7893ccfSSadaf Ebrahimi
5533*b7893ccfSSadaf Ebrahimi enum SECOND_VECTOR_MODE
5534*b7893ccfSSadaf Ebrahimi {
5535*b7893ccfSSadaf Ebrahimi SECOND_VECTOR_EMPTY,
5536*b7893ccfSSadaf Ebrahimi /*
5537*b7893ccfSSadaf Ebrahimi Suballocations in 2nd vector are created later than the ones in 1st, but they
5538*b7893ccfSSadaf Ebrahimi all have smaller offset.
5539*b7893ccfSSadaf Ebrahimi */
5540*b7893ccfSSadaf Ebrahimi SECOND_VECTOR_RING_BUFFER,
5541*b7893ccfSSadaf Ebrahimi /*
5542*b7893ccfSSadaf Ebrahimi Suballocations in 2nd vector are upper side of double stack.
5543*b7893ccfSSadaf Ebrahimi They all have offsets higher than those in 1st vector.
5544*b7893ccfSSadaf Ebrahimi Top of this stack means smaller offsets, but higher indices in this vector.
5545*b7893ccfSSadaf Ebrahimi */
5546*b7893ccfSSadaf Ebrahimi SECOND_VECTOR_DOUBLE_STACK,
5547*b7893ccfSSadaf Ebrahimi };
5548*b7893ccfSSadaf Ebrahimi
5549*b7893ccfSSadaf Ebrahimi VkDeviceSize m_SumFreeSize;
5550*b7893ccfSSadaf Ebrahimi SuballocationVectorType m_Suballocations0, m_Suballocations1;
5551*b7893ccfSSadaf Ebrahimi uint32_t m_1stVectorIndex;
5552*b7893ccfSSadaf Ebrahimi SECOND_VECTOR_MODE m_2ndVectorMode;
5553*b7893ccfSSadaf Ebrahimi
AccessSuballocations1st()5554*b7893ccfSSadaf Ebrahimi SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
AccessSuballocations2nd()5555*b7893ccfSSadaf Ebrahimi SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
AccessSuballocations1st()5556*b7893ccfSSadaf Ebrahimi const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
AccessSuballocations2nd()5557*b7893ccfSSadaf Ebrahimi const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5558*b7893ccfSSadaf Ebrahimi
5559*b7893ccfSSadaf Ebrahimi // Number of items in 1st vector with hAllocation = null at the beginning.
5560*b7893ccfSSadaf Ebrahimi size_t m_1stNullItemsBeginCount;
5561*b7893ccfSSadaf Ebrahimi // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5562*b7893ccfSSadaf Ebrahimi size_t m_1stNullItemsMiddleCount;
5563*b7893ccfSSadaf Ebrahimi // Number of items in 2nd vector with hAllocation = null.
5564*b7893ccfSSadaf Ebrahimi size_t m_2ndNullItemsCount;
5565*b7893ccfSSadaf Ebrahimi
5566*b7893ccfSSadaf Ebrahimi bool ShouldCompact1st() const;
5567*b7893ccfSSadaf Ebrahimi void CleanupAfterFree();
5568*b7893ccfSSadaf Ebrahimi };
5569*b7893ccfSSadaf Ebrahimi
5570*b7893ccfSSadaf Ebrahimi /*
5571*b7893ccfSSadaf Ebrahimi - GetSize() is the original size of allocated memory block.
5572*b7893ccfSSadaf Ebrahimi - m_UsableSize is this size aligned down to a power of two.
5573*b7893ccfSSadaf Ebrahimi All allocations and calculations happen relative to m_UsableSize.
5574*b7893ccfSSadaf Ebrahimi - GetUnusableSize() is the difference between them.
5575*b7893ccfSSadaf Ebrahimi It is repoted as separate, unused range, not available for allocations.
5576*b7893ccfSSadaf Ebrahimi
5577*b7893ccfSSadaf Ebrahimi Node at level 0 has size = m_UsableSize.
5578*b7893ccfSSadaf Ebrahimi Each next level contains nodes with size 2 times smaller than current level.
5579*b7893ccfSSadaf Ebrahimi m_LevelCount is the maximum number of levels to use in the current object.
5580*b7893ccfSSadaf Ebrahimi */
5581*b7893ccfSSadaf Ebrahimi class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5582*b7893ccfSSadaf Ebrahimi {
5583*b7893ccfSSadaf Ebrahimi VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5584*b7893ccfSSadaf Ebrahimi public:
5585*b7893ccfSSadaf Ebrahimi VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5586*b7893ccfSSadaf Ebrahimi virtual ~VmaBlockMetadata_Buddy();
5587*b7893ccfSSadaf Ebrahimi virtual void Init(VkDeviceSize size);
5588*b7893ccfSSadaf Ebrahimi
5589*b7893ccfSSadaf Ebrahimi virtual bool Validate() const;
GetAllocationCount()5590*b7893ccfSSadaf Ebrahimi virtual size_t GetAllocationCount() const { return m_AllocationCount; }
GetSumFreeSize()5591*b7893ccfSSadaf Ebrahimi virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5592*b7893ccfSSadaf Ebrahimi virtual VkDeviceSize GetUnusedRangeSizeMax() const;
IsEmpty()5593*b7893ccfSSadaf Ebrahimi virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5594*b7893ccfSSadaf Ebrahimi
5595*b7893ccfSSadaf Ebrahimi virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5596*b7893ccfSSadaf Ebrahimi virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5597*b7893ccfSSadaf Ebrahimi
5598*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
5599*b7893ccfSSadaf Ebrahimi virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5600*b7893ccfSSadaf Ebrahimi #endif
5601*b7893ccfSSadaf Ebrahimi
5602*b7893ccfSSadaf Ebrahimi virtual bool CreateAllocationRequest(
5603*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
5604*b7893ccfSSadaf Ebrahimi uint32_t frameInUseCount,
5605*b7893ccfSSadaf Ebrahimi VkDeviceSize bufferImageGranularity,
5606*b7893ccfSSadaf Ebrahimi VkDeviceSize allocSize,
5607*b7893ccfSSadaf Ebrahimi VkDeviceSize allocAlignment,
5608*b7893ccfSSadaf Ebrahimi bool upperAddress,
5609*b7893ccfSSadaf Ebrahimi VmaSuballocationType allocType,
5610*b7893ccfSSadaf Ebrahimi bool canMakeOtherLost,
5611*b7893ccfSSadaf Ebrahimi uint32_t strategy,
5612*b7893ccfSSadaf Ebrahimi VmaAllocationRequest* pAllocationRequest);
5613*b7893ccfSSadaf Ebrahimi
5614*b7893ccfSSadaf Ebrahimi virtual bool MakeRequestedAllocationsLost(
5615*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
5616*b7893ccfSSadaf Ebrahimi uint32_t frameInUseCount,
5617*b7893ccfSSadaf Ebrahimi VmaAllocationRequest* pAllocationRequest);
5618*b7893ccfSSadaf Ebrahimi
5619*b7893ccfSSadaf Ebrahimi virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5620*b7893ccfSSadaf Ebrahimi
CheckCorruption(const void * pBlockData)5621*b7893ccfSSadaf Ebrahimi virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5622*b7893ccfSSadaf Ebrahimi
5623*b7893ccfSSadaf Ebrahimi virtual void Alloc(
5624*b7893ccfSSadaf Ebrahimi const VmaAllocationRequest& request,
5625*b7893ccfSSadaf Ebrahimi VmaSuballocationType type,
5626*b7893ccfSSadaf Ebrahimi VkDeviceSize allocSize,
5627*b7893ccfSSadaf Ebrahimi bool upperAddress,
5628*b7893ccfSSadaf Ebrahimi VmaAllocation hAllocation);
5629*b7893ccfSSadaf Ebrahimi
Free(const VmaAllocation allocation)5630*b7893ccfSSadaf Ebrahimi virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
FreeAtOffset(VkDeviceSize offset)5631*b7893ccfSSadaf Ebrahimi virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5632*b7893ccfSSadaf Ebrahimi
5633*b7893ccfSSadaf Ebrahimi private:
5634*b7893ccfSSadaf Ebrahimi static const VkDeviceSize MIN_NODE_SIZE = 32;
5635*b7893ccfSSadaf Ebrahimi static const size_t MAX_LEVELS = 30;
5636*b7893ccfSSadaf Ebrahimi
5637*b7893ccfSSadaf Ebrahimi struct ValidationContext
5638*b7893ccfSSadaf Ebrahimi {
5639*b7893ccfSSadaf Ebrahimi size_t calculatedAllocationCount;
5640*b7893ccfSSadaf Ebrahimi size_t calculatedFreeCount;
5641*b7893ccfSSadaf Ebrahimi VkDeviceSize calculatedSumFreeSize;
5642*b7893ccfSSadaf Ebrahimi
ValidationContextValidationContext5643*b7893ccfSSadaf Ebrahimi ValidationContext() :
5644*b7893ccfSSadaf Ebrahimi calculatedAllocationCount(0),
5645*b7893ccfSSadaf Ebrahimi calculatedFreeCount(0),
5646*b7893ccfSSadaf Ebrahimi calculatedSumFreeSize(0) { }
5647*b7893ccfSSadaf Ebrahimi };
5648*b7893ccfSSadaf Ebrahimi
5649*b7893ccfSSadaf Ebrahimi struct Node
5650*b7893ccfSSadaf Ebrahimi {
5651*b7893ccfSSadaf Ebrahimi VkDeviceSize offset;
5652*b7893ccfSSadaf Ebrahimi enum TYPE
5653*b7893ccfSSadaf Ebrahimi {
5654*b7893ccfSSadaf Ebrahimi TYPE_FREE,
5655*b7893ccfSSadaf Ebrahimi TYPE_ALLOCATION,
5656*b7893ccfSSadaf Ebrahimi TYPE_SPLIT,
5657*b7893ccfSSadaf Ebrahimi TYPE_COUNT
5658*b7893ccfSSadaf Ebrahimi } type;
5659*b7893ccfSSadaf Ebrahimi Node* parent;
5660*b7893ccfSSadaf Ebrahimi Node* buddy;
5661*b7893ccfSSadaf Ebrahimi
5662*b7893ccfSSadaf Ebrahimi union
5663*b7893ccfSSadaf Ebrahimi {
5664*b7893ccfSSadaf Ebrahimi struct
5665*b7893ccfSSadaf Ebrahimi {
5666*b7893ccfSSadaf Ebrahimi Node* prev;
5667*b7893ccfSSadaf Ebrahimi Node* next;
5668*b7893ccfSSadaf Ebrahimi } free;
5669*b7893ccfSSadaf Ebrahimi struct
5670*b7893ccfSSadaf Ebrahimi {
5671*b7893ccfSSadaf Ebrahimi VmaAllocation alloc;
5672*b7893ccfSSadaf Ebrahimi } allocation;
5673*b7893ccfSSadaf Ebrahimi struct
5674*b7893ccfSSadaf Ebrahimi {
5675*b7893ccfSSadaf Ebrahimi Node* leftChild;
5676*b7893ccfSSadaf Ebrahimi } split;
5677*b7893ccfSSadaf Ebrahimi };
5678*b7893ccfSSadaf Ebrahimi };
5679*b7893ccfSSadaf Ebrahimi
5680*b7893ccfSSadaf Ebrahimi // Size of the memory block aligned down to a power of two.
5681*b7893ccfSSadaf Ebrahimi VkDeviceSize m_UsableSize;
5682*b7893ccfSSadaf Ebrahimi uint32_t m_LevelCount;
5683*b7893ccfSSadaf Ebrahimi
5684*b7893ccfSSadaf Ebrahimi Node* m_Root;
5685*b7893ccfSSadaf Ebrahimi struct {
5686*b7893ccfSSadaf Ebrahimi Node* front;
5687*b7893ccfSSadaf Ebrahimi Node* back;
5688*b7893ccfSSadaf Ebrahimi } m_FreeList[MAX_LEVELS];
5689*b7893ccfSSadaf Ebrahimi // Number of nodes in the tree with type == TYPE_ALLOCATION.
5690*b7893ccfSSadaf Ebrahimi size_t m_AllocationCount;
5691*b7893ccfSSadaf Ebrahimi // Number of nodes in the tree with type == TYPE_FREE.
5692*b7893ccfSSadaf Ebrahimi size_t m_FreeCount;
5693*b7893ccfSSadaf Ebrahimi // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5694*b7893ccfSSadaf Ebrahimi VkDeviceSize m_SumFreeSize;
5695*b7893ccfSSadaf Ebrahimi
GetUnusableSize()5696*b7893ccfSSadaf Ebrahimi VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5697*b7893ccfSSadaf Ebrahimi void DeleteNode(Node* node);
5698*b7893ccfSSadaf Ebrahimi bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5699*b7893ccfSSadaf Ebrahimi uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
LevelToNodeSize(uint32_t level)5700*b7893ccfSSadaf Ebrahimi inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5701*b7893ccfSSadaf Ebrahimi // Alloc passed just for validation. Can be null.
5702*b7893ccfSSadaf Ebrahimi void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5703*b7893ccfSSadaf Ebrahimi void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5704*b7893ccfSSadaf Ebrahimi // Adds node to the front of FreeList at given level.
5705*b7893ccfSSadaf Ebrahimi // node->type must be FREE.
5706*b7893ccfSSadaf Ebrahimi // node->free.prev, next can be undefined.
5707*b7893ccfSSadaf Ebrahimi void AddToFreeListFront(uint32_t level, Node* node);
5708*b7893ccfSSadaf Ebrahimi // Removes node from FreeList at given level.
5709*b7893ccfSSadaf Ebrahimi // node->type must be FREE.
5710*b7893ccfSSadaf Ebrahimi // node->free.prev, next stay untouched.
5711*b7893ccfSSadaf Ebrahimi void RemoveFromFreeList(uint32_t level, Node* node);
5712*b7893ccfSSadaf Ebrahimi
5713*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
5714*b7893ccfSSadaf Ebrahimi void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5715*b7893ccfSSadaf Ebrahimi #endif
5716*b7893ccfSSadaf Ebrahimi };
5717*b7893ccfSSadaf Ebrahimi
5718*b7893ccfSSadaf Ebrahimi /*
5719*b7893ccfSSadaf Ebrahimi Represents a single block of device memory (`VkDeviceMemory`) with all the
5720*b7893ccfSSadaf Ebrahimi data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5721*b7893ccfSSadaf Ebrahimi
5722*b7893ccfSSadaf Ebrahimi Thread-safety: This class must be externally synchronized.
5723*b7893ccfSSadaf Ebrahimi */
5724*b7893ccfSSadaf Ebrahimi class VmaDeviceMemoryBlock
5725*b7893ccfSSadaf Ebrahimi {
5726*b7893ccfSSadaf Ebrahimi VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5727*b7893ccfSSadaf Ebrahimi public:
5728*b7893ccfSSadaf Ebrahimi VmaBlockMetadata* m_pMetadata;
5729*b7893ccfSSadaf Ebrahimi
5730*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5731*b7893ccfSSadaf Ebrahimi
~VmaDeviceMemoryBlock()5732*b7893ccfSSadaf Ebrahimi ~VmaDeviceMemoryBlock()
5733*b7893ccfSSadaf Ebrahimi {
5734*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5735*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5736*b7893ccfSSadaf Ebrahimi }
5737*b7893ccfSSadaf Ebrahimi
5738*b7893ccfSSadaf Ebrahimi // Always call after construction.
5739*b7893ccfSSadaf Ebrahimi void Init(
5740*b7893ccfSSadaf Ebrahimi VmaAllocator hAllocator,
5741*b7893ccfSSadaf Ebrahimi uint32_t newMemoryTypeIndex,
5742*b7893ccfSSadaf Ebrahimi VkDeviceMemory newMemory,
5743*b7893ccfSSadaf Ebrahimi VkDeviceSize newSize,
5744*b7893ccfSSadaf Ebrahimi uint32_t id,
5745*b7893ccfSSadaf Ebrahimi uint32_t algorithm);
5746*b7893ccfSSadaf Ebrahimi // Always call before destruction.
5747*b7893ccfSSadaf Ebrahimi void Destroy(VmaAllocator allocator);
5748*b7893ccfSSadaf Ebrahimi
GetDeviceMemory()5749*b7893ccfSSadaf Ebrahimi VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
GetMemoryTypeIndex()5750*b7893ccfSSadaf Ebrahimi uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
GetId()5751*b7893ccfSSadaf Ebrahimi uint32_t GetId() const { return m_Id; }
GetMappedData()5752*b7893ccfSSadaf Ebrahimi void* GetMappedData() const { return m_pMappedData; }
5753*b7893ccfSSadaf Ebrahimi
5754*b7893ccfSSadaf Ebrahimi // Validates all data structures inside this object. If not valid, returns false.
5755*b7893ccfSSadaf Ebrahimi bool Validate() const;
5756*b7893ccfSSadaf Ebrahimi
5757*b7893ccfSSadaf Ebrahimi VkResult CheckCorruption(VmaAllocator hAllocator);
5758*b7893ccfSSadaf Ebrahimi
5759*b7893ccfSSadaf Ebrahimi // ppData can be null.
5760*b7893ccfSSadaf Ebrahimi VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5761*b7893ccfSSadaf Ebrahimi void Unmap(VmaAllocator hAllocator, uint32_t count);
5762*b7893ccfSSadaf Ebrahimi
5763*b7893ccfSSadaf Ebrahimi VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5764*b7893ccfSSadaf Ebrahimi VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5765*b7893ccfSSadaf Ebrahimi
5766*b7893ccfSSadaf Ebrahimi VkResult BindBufferMemory(
5767*b7893ccfSSadaf Ebrahimi const VmaAllocator hAllocator,
5768*b7893ccfSSadaf Ebrahimi const VmaAllocation hAllocation,
5769*b7893ccfSSadaf Ebrahimi VkBuffer hBuffer);
5770*b7893ccfSSadaf Ebrahimi VkResult BindImageMemory(
5771*b7893ccfSSadaf Ebrahimi const VmaAllocator hAllocator,
5772*b7893ccfSSadaf Ebrahimi const VmaAllocation hAllocation,
5773*b7893ccfSSadaf Ebrahimi VkImage hImage);
5774*b7893ccfSSadaf Ebrahimi
5775*b7893ccfSSadaf Ebrahimi private:
5776*b7893ccfSSadaf Ebrahimi uint32_t m_MemoryTypeIndex;
5777*b7893ccfSSadaf Ebrahimi uint32_t m_Id;
5778*b7893ccfSSadaf Ebrahimi VkDeviceMemory m_hMemory;
5779*b7893ccfSSadaf Ebrahimi
5780*b7893ccfSSadaf Ebrahimi /*
5781*b7893ccfSSadaf Ebrahimi Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5782*b7893ccfSSadaf Ebrahimi Also protects m_MapCount, m_pMappedData.
5783*b7893ccfSSadaf Ebrahimi Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5784*b7893ccfSSadaf Ebrahimi */
5785*b7893ccfSSadaf Ebrahimi VMA_MUTEX m_Mutex;
5786*b7893ccfSSadaf Ebrahimi uint32_t m_MapCount;
5787*b7893ccfSSadaf Ebrahimi void* m_pMappedData;
5788*b7893ccfSSadaf Ebrahimi };
5789*b7893ccfSSadaf Ebrahimi
5790*b7893ccfSSadaf Ebrahimi struct VmaPointerLess
5791*b7893ccfSSadaf Ebrahimi {
operatorVmaPointerLess5792*b7893ccfSSadaf Ebrahimi bool operator()(const void* lhs, const void* rhs) const
5793*b7893ccfSSadaf Ebrahimi {
5794*b7893ccfSSadaf Ebrahimi return lhs < rhs;
5795*b7893ccfSSadaf Ebrahimi }
5796*b7893ccfSSadaf Ebrahimi };
5797*b7893ccfSSadaf Ebrahimi
5798*b7893ccfSSadaf Ebrahimi struct VmaDefragmentationMove
5799*b7893ccfSSadaf Ebrahimi {
5800*b7893ccfSSadaf Ebrahimi size_t srcBlockIndex;
5801*b7893ccfSSadaf Ebrahimi size_t dstBlockIndex;
5802*b7893ccfSSadaf Ebrahimi VkDeviceSize srcOffset;
5803*b7893ccfSSadaf Ebrahimi VkDeviceSize dstOffset;
5804*b7893ccfSSadaf Ebrahimi VkDeviceSize size;
5805*b7893ccfSSadaf Ebrahimi };
5806*b7893ccfSSadaf Ebrahimi
5807*b7893ccfSSadaf Ebrahimi class VmaDefragmentationAlgorithm;
5808*b7893ccfSSadaf Ebrahimi
5809*b7893ccfSSadaf Ebrahimi /*
5810*b7893ccfSSadaf Ebrahimi Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5811*b7893ccfSSadaf Ebrahimi Vulkan memory type.
5812*b7893ccfSSadaf Ebrahimi
5813*b7893ccfSSadaf Ebrahimi Synchronized internally with a mutex.
5814*b7893ccfSSadaf Ebrahimi */
5815*b7893ccfSSadaf Ebrahimi struct VmaBlockVector
5816*b7893ccfSSadaf Ebrahimi {
5817*b7893ccfSSadaf Ebrahimi VMA_CLASS_NO_COPY(VmaBlockVector)
5818*b7893ccfSSadaf Ebrahimi public:
5819*b7893ccfSSadaf Ebrahimi VmaBlockVector(
5820*b7893ccfSSadaf Ebrahimi VmaAllocator hAllocator,
5821*b7893ccfSSadaf Ebrahimi uint32_t memoryTypeIndex,
5822*b7893ccfSSadaf Ebrahimi VkDeviceSize preferredBlockSize,
5823*b7893ccfSSadaf Ebrahimi size_t minBlockCount,
5824*b7893ccfSSadaf Ebrahimi size_t maxBlockCount,
5825*b7893ccfSSadaf Ebrahimi VkDeviceSize bufferImageGranularity,
5826*b7893ccfSSadaf Ebrahimi uint32_t frameInUseCount,
5827*b7893ccfSSadaf Ebrahimi bool isCustomPool,
5828*b7893ccfSSadaf Ebrahimi bool explicitBlockSize,
5829*b7893ccfSSadaf Ebrahimi uint32_t algorithm);
5830*b7893ccfSSadaf Ebrahimi ~VmaBlockVector();
5831*b7893ccfSSadaf Ebrahimi
5832*b7893ccfSSadaf Ebrahimi VkResult CreateMinBlocks();
5833*b7893ccfSSadaf Ebrahimi
GetMemoryTypeIndexVmaBlockVector5834*b7893ccfSSadaf Ebrahimi uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
GetPreferredBlockSizeVmaBlockVector5835*b7893ccfSSadaf Ebrahimi VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
GetBufferImageGranularityVmaBlockVector5836*b7893ccfSSadaf Ebrahimi VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
GetFrameInUseCountVmaBlockVector5837*b7893ccfSSadaf Ebrahimi uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
GetAlgorithmVmaBlockVector5838*b7893ccfSSadaf Ebrahimi uint32_t GetAlgorithm() const { return m_Algorithm; }
5839*b7893ccfSSadaf Ebrahimi
5840*b7893ccfSSadaf Ebrahimi void GetPoolStats(VmaPoolStats* pStats);
5841*b7893ccfSSadaf Ebrahimi
IsEmptyVmaBlockVector5842*b7893ccfSSadaf Ebrahimi bool IsEmpty() const { return m_Blocks.empty(); }
5843*b7893ccfSSadaf Ebrahimi bool IsCorruptionDetectionEnabled() const;
5844*b7893ccfSSadaf Ebrahimi
5845*b7893ccfSSadaf Ebrahimi VkResult Allocate(
5846*b7893ccfSSadaf Ebrahimi VmaPool hCurrentPool,
5847*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
5848*b7893ccfSSadaf Ebrahimi VkDeviceSize size,
5849*b7893ccfSSadaf Ebrahimi VkDeviceSize alignment,
5850*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo& createInfo,
5851*b7893ccfSSadaf Ebrahimi VmaSuballocationType suballocType,
5852*b7893ccfSSadaf Ebrahimi size_t allocationCount,
5853*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocations);
5854*b7893ccfSSadaf Ebrahimi
5855*b7893ccfSSadaf Ebrahimi void Free(
5856*b7893ccfSSadaf Ebrahimi VmaAllocation hAllocation);
5857*b7893ccfSSadaf Ebrahimi
5858*b7893ccfSSadaf Ebrahimi // Adds statistics of this BlockVector to pStats.
5859*b7893ccfSSadaf Ebrahimi void AddStats(VmaStats* pStats);
5860*b7893ccfSSadaf Ebrahimi
5861*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
5862*b7893ccfSSadaf Ebrahimi void PrintDetailedMap(class VmaJsonWriter& json);
5863*b7893ccfSSadaf Ebrahimi #endif
5864*b7893ccfSSadaf Ebrahimi
5865*b7893ccfSSadaf Ebrahimi void MakePoolAllocationsLost(
5866*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
5867*b7893ccfSSadaf Ebrahimi size_t* pLostAllocationCount);
5868*b7893ccfSSadaf Ebrahimi VkResult CheckCorruption();
5869*b7893ccfSSadaf Ebrahimi
5870*b7893ccfSSadaf Ebrahimi // Saves results in pCtx->res.
5871*b7893ccfSSadaf Ebrahimi void Defragment(
5872*b7893ccfSSadaf Ebrahimi class VmaBlockVectorDefragmentationContext* pCtx,
5873*b7893ccfSSadaf Ebrahimi VmaDefragmentationStats* pStats,
5874*b7893ccfSSadaf Ebrahimi VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5875*b7893ccfSSadaf Ebrahimi VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5876*b7893ccfSSadaf Ebrahimi VkCommandBuffer commandBuffer);
5877*b7893ccfSSadaf Ebrahimi void DefragmentationEnd(
5878*b7893ccfSSadaf Ebrahimi class VmaBlockVectorDefragmentationContext* pCtx,
5879*b7893ccfSSadaf Ebrahimi VmaDefragmentationStats* pStats);
5880*b7893ccfSSadaf Ebrahimi
5881*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
5882*b7893ccfSSadaf Ebrahimi // To be used only while the m_Mutex is locked. Used during defragmentation.
5883*b7893ccfSSadaf Ebrahimi
GetBlockCountVmaBlockVector5884*b7893ccfSSadaf Ebrahimi size_t GetBlockCount() const { return m_Blocks.size(); }
GetBlockVmaBlockVector5885*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5886*b7893ccfSSadaf Ebrahimi size_t CalcAllocationCount() const;
5887*b7893ccfSSadaf Ebrahimi bool IsBufferImageGranularityConflictPossible() const;
5888*b7893ccfSSadaf Ebrahimi
5889*b7893ccfSSadaf Ebrahimi private:
5890*b7893ccfSSadaf Ebrahimi friend class VmaDefragmentationAlgorithm_Generic;
5891*b7893ccfSSadaf Ebrahimi
5892*b7893ccfSSadaf Ebrahimi const VmaAllocator m_hAllocator;
5893*b7893ccfSSadaf Ebrahimi const uint32_t m_MemoryTypeIndex;
5894*b7893ccfSSadaf Ebrahimi const VkDeviceSize m_PreferredBlockSize;
5895*b7893ccfSSadaf Ebrahimi const size_t m_MinBlockCount;
5896*b7893ccfSSadaf Ebrahimi const size_t m_MaxBlockCount;
5897*b7893ccfSSadaf Ebrahimi const VkDeviceSize m_BufferImageGranularity;
5898*b7893ccfSSadaf Ebrahimi const uint32_t m_FrameInUseCount;
5899*b7893ccfSSadaf Ebrahimi const bool m_IsCustomPool;
5900*b7893ccfSSadaf Ebrahimi const bool m_ExplicitBlockSize;
5901*b7893ccfSSadaf Ebrahimi const uint32_t m_Algorithm;
5902*b7893ccfSSadaf Ebrahimi /* There can be at most one allocation that is completely empty - a
5903*b7893ccfSSadaf Ebrahimi hysteresis to avoid pessimistic case of alternating creation and destruction
5904*b7893ccfSSadaf Ebrahimi of a VkDeviceMemory. */
5905*b7893ccfSSadaf Ebrahimi bool m_HasEmptyBlock;
5906*b7893ccfSSadaf Ebrahimi VMA_RW_MUTEX m_Mutex;
5907*b7893ccfSSadaf Ebrahimi // Incrementally sorted by sumFreeSize, ascending.
5908*b7893ccfSSadaf Ebrahimi VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5909*b7893ccfSSadaf Ebrahimi uint32_t m_NextBlockId;
5910*b7893ccfSSadaf Ebrahimi
5911*b7893ccfSSadaf Ebrahimi VkDeviceSize CalcMaxBlockSize() const;
5912*b7893ccfSSadaf Ebrahimi
5913*b7893ccfSSadaf Ebrahimi // Finds and removes given block from vector.
5914*b7893ccfSSadaf Ebrahimi void Remove(VmaDeviceMemoryBlock* pBlock);
5915*b7893ccfSSadaf Ebrahimi
5916*b7893ccfSSadaf Ebrahimi // Performs single step in sorting m_Blocks. They may not be fully sorted
5917*b7893ccfSSadaf Ebrahimi // after this call.
5918*b7893ccfSSadaf Ebrahimi void IncrementallySortBlocks();
5919*b7893ccfSSadaf Ebrahimi
5920*b7893ccfSSadaf Ebrahimi VkResult AllocatePage(
5921*b7893ccfSSadaf Ebrahimi VmaPool hCurrentPool,
5922*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
5923*b7893ccfSSadaf Ebrahimi VkDeviceSize size,
5924*b7893ccfSSadaf Ebrahimi VkDeviceSize alignment,
5925*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo& createInfo,
5926*b7893ccfSSadaf Ebrahimi VmaSuballocationType suballocType,
5927*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocation);
5928*b7893ccfSSadaf Ebrahimi
5929*b7893ccfSSadaf Ebrahimi // To be used only without CAN_MAKE_OTHER_LOST flag.
5930*b7893ccfSSadaf Ebrahimi VkResult AllocateFromBlock(
5931*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* pBlock,
5932*b7893ccfSSadaf Ebrahimi VmaPool hCurrentPool,
5933*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
5934*b7893ccfSSadaf Ebrahimi VkDeviceSize size,
5935*b7893ccfSSadaf Ebrahimi VkDeviceSize alignment,
5936*b7893ccfSSadaf Ebrahimi VmaAllocationCreateFlags allocFlags,
5937*b7893ccfSSadaf Ebrahimi void* pUserData,
5938*b7893ccfSSadaf Ebrahimi VmaSuballocationType suballocType,
5939*b7893ccfSSadaf Ebrahimi uint32_t strategy,
5940*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocation);
5941*b7893ccfSSadaf Ebrahimi
5942*b7893ccfSSadaf Ebrahimi VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5943*b7893ccfSSadaf Ebrahimi
5944*b7893ccfSSadaf Ebrahimi // Saves result to pCtx->res.
5945*b7893ccfSSadaf Ebrahimi void ApplyDefragmentationMovesCpu(
5946*b7893ccfSSadaf Ebrahimi class VmaBlockVectorDefragmentationContext* pDefragCtx,
5947*b7893ccfSSadaf Ebrahimi const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
5948*b7893ccfSSadaf Ebrahimi // Saves result to pCtx->res.
5949*b7893ccfSSadaf Ebrahimi void ApplyDefragmentationMovesGpu(
5950*b7893ccfSSadaf Ebrahimi class VmaBlockVectorDefragmentationContext* pDefragCtx,
5951*b7893ccfSSadaf Ebrahimi const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5952*b7893ccfSSadaf Ebrahimi VkCommandBuffer commandBuffer);
5953*b7893ccfSSadaf Ebrahimi
5954*b7893ccfSSadaf Ebrahimi /*
5955*b7893ccfSSadaf Ebrahimi Used during defragmentation. pDefragmentationStats is optional. It's in/out
5956*b7893ccfSSadaf Ebrahimi - updated with new data.
5957*b7893ccfSSadaf Ebrahimi */
5958*b7893ccfSSadaf Ebrahimi void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
5959*b7893ccfSSadaf Ebrahimi };
5960*b7893ccfSSadaf Ebrahimi
5961*b7893ccfSSadaf Ebrahimi struct VmaPool_T
5962*b7893ccfSSadaf Ebrahimi {
5963*b7893ccfSSadaf Ebrahimi VMA_CLASS_NO_COPY(VmaPool_T)
5964*b7893ccfSSadaf Ebrahimi public:
5965*b7893ccfSSadaf Ebrahimi VmaBlockVector m_BlockVector;
5966*b7893ccfSSadaf Ebrahimi
5967*b7893ccfSSadaf Ebrahimi VmaPool_T(
5968*b7893ccfSSadaf Ebrahimi VmaAllocator hAllocator,
5969*b7893ccfSSadaf Ebrahimi const VmaPoolCreateInfo& createInfo,
5970*b7893ccfSSadaf Ebrahimi VkDeviceSize preferredBlockSize);
5971*b7893ccfSSadaf Ebrahimi ~VmaPool_T();
5972*b7893ccfSSadaf Ebrahimi
GetIdVmaPool_T5973*b7893ccfSSadaf Ebrahimi uint32_t GetId() const { return m_Id; }
SetIdVmaPool_T5974*b7893ccfSSadaf Ebrahimi void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5975*b7893ccfSSadaf Ebrahimi
5976*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
5977*b7893ccfSSadaf Ebrahimi //void PrintDetailedMap(class VmaStringBuilder& sb);
5978*b7893ccfSSadaf Ebrahimi #endif
5979*b7893ccfSSadaf Ebrahimi
5980*b7893ccfSSadaf Ebrahimi private:
5981*b7893ccfSSadaf Ebrahimi uint32_t m_Id;
5982*b7893ccfSSadaf Ebrahimi };
5983*b7893ccfSSadaf Ebrahimi
5984*b7893ccfSSadaf Ebrahimi /*
5985*b7893ccfSSadaf Ebrahimi Performs defragmentation:
5986*b7893ccfSSadaf Ebrahimi
5987*b7893ccfSSadaf Ebrahimi - Updates `pBlockVector->m_pMetadata`.
5988*b7893ccfSSadaf Ebrahimi - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
5989*b7893ccfSSadaf Ebrahimi - Does not move actual data, only returns requested moves as `moves`.
5990*b7893ccfSSadaf Ebrahimi */
5991*b7893ccfSSadaf Ebrahimi class VmaDefragmentationAlgorithm
5992*b7893ccfSSadaf Ebrahimi {
VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)5993*b7893ccfSSadaf Ebrahimi VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
5994*b7893ccfSSadaf Ebrahimi public:
5995*b7893ccfSSadaf Ebrahimi VmaDefragmentationAlgorithm(
5996*b7893ccfSSadaf Ebrahimi VmaAllocator hAllocator,
5997*b7893ccfSSadaf Ebrahimi VmaBlockVector* pBlockVector,
5998*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex) :
5999*b7893ccfSSadaf Ebrahimi m_hAllocator(hAllocator),
6000*b7893ccfSSadaf Ebrahimi m_pBlockVector(pBlockVector),
6001*b7893ccfSSadaf Ebrahimi m_CurrentFrameIndex(currentFrameIndex)
6002*b7893ccfSSadaf Ebrahimi {
6003*b7893ccfSSadaf Ebrahimi }
~VmaDefragmentationAlgorithm()6004*b7893ccfSSadaf Ebrahimi virtual ~VmaDefragmentationAlgorithm()
6005*b7893ccfSSadaf Ebrahimi {
6006*b7893ccfSSadaf Ebrahimi }
6007*b7893ccfSSadaf Ebrahimi
6008*b7893ccfSSadaf Ebrahimi virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6009*b7893ccfSSadaf Ebrahimi virtual void AddAll() = 0;
6010*b7893ccfSSadaf Ebrahimi
6011*b7893ccfSSadaf Ebrahimi virtual VkResult Defragment(
6012*b7893ccfSSadaf Ebrahimi VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6013*b7893ccfSSadaf Ebrahimi VkDeviceSize maxBytesToMove,
6014*b7893ccfSSadaf Ebrahimi uint32_t maxAllocationsToMove) = 0;
6015*b7893ccfSSadaf Ebrahimi
6016*b7893ccfSSadaf Ebrahimi virtual VkDeviceSize GetBytesMoved() const = 0;
6017*b7893ccfSSadaf Ebrahimi virtual uint32_t GetAllocationsMoved() const = 0;
6018*b7893ccfSSadaf Ebrahimi
6019*b7893ccfSSadaf Ebrahimi protected:
6020*b7893ccfSSadaf Ebrahimi VmaAllocator const m_hAllocator;
6021*b7893ccfSSadaf Ebrahimi VmaBlockVector* const m_pBlockVector;
6022*b7893ccfSSadaf Ebrahimi const uint32_t m_CurrentFrameIndex;
6023*b7893ccfSSadaf Ebrahimi
6024*b7893ccfSSadaf Ebrahimi struct AllocationInfo
6025*b7893ccfSSadaf Ebrahimi {
6026*b7893ccfSSadaf Ebrahimi VmaAllocation m_hAllocation;
6027*b7893ccfSSadaf Ebrahimi VkBool32* m_pChanged;
6028*b7893ccfSSadaf Ebrahimi
AllocationInfoAllocationInfo6029*b7893ccfSSadaf Ebrahimi AllocationInfo() :
6030*b7893ccfSSadaf Ebrahimi m_hAllocation(VK_NULL_HANDLE),
6031*b7893ccfSSadaf Ebrahimi m_pChanged(VMA_NULL)
6032*b7893ccfSSadaf Ebrahimi {
6033*b7893ccfSSadaf Ebrahimi }
AllocationInfoAllocationInfo6034*b7893ccfSSadaf Ebrahimi AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6035*b7893ccfSSadaf Ebrahimi m_hAllocation(hAlloc),
6036*b7893ccfSSadaf Ebrahimi m_pChanged(pChanged)
6037*b7893ccfSSadaf Ebrahimi {
6038*b7893ccfSSadaf Ebrahimi }
6039*b7893ccfSSadaf Ebrahimi };
6040*b7893ccfSSadaf Ebrahimi };
6041*b7893ccfSSadaf Ebrahimi
6042*b7893ccfSSadaf Ebrahimi class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6043*b7893ccfSSadaf Ebrahimi {
6044*b7893ccfSSadaf Ebrahimi VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6045*b7893ccfSSadaf Ebrahimi public:
6046*b7893ccfSSadaf Ebrahimi VmaDefragmentationAlgorithm_Generic(
6047*b7893ccfSSadaf Ebrahimi VmaAllocator hAllocator,
6048*b7893ccfSSadaf Ebrahimi VmaBlockVector* pBlockVector,
6049*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
6050*b7893ccfSSadaf Ebrahimi bool overlappingMoveSupported);
6051*b7893ccfSSadaf Ebrahimi virtual ~VmaDefragmentationAlgorithm_Generic();
6052*b7893ccfSSadaf Ebrahimi
6053*b7893ccfSSadaf Ebrahimi virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
AddAll()6054*b7893ccfSSadaf Ebrahimi virtual void AddAll() { m_AllAllocations = true; }
6055*b7893ccfSSadaf Ebrahimi
6056*b7893ccfSSadaf Ebrahimi virtual VkResult Defragment(
6057*b7893ccfSSadaf Ebrahimi VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6058*b7893ccfSSadaf Ebrahimi VkDeviceSize maxBytesToMove,
6059*b7893ccfSSadaf Ebrahimi uint32_t maxAllocationsToMove);
6060*b7893ccfSSadaf Ebrahimi
GetBytesMoved()6061*b7893ccfSSadaf Ebrahimi virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
GetAllocationsMoved()6062*b7893ccfSSadaf Ebrahimi virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6063*b7893ccfSSadaf Ebrahimi
6064*b7893ccfSSadaf Ebrahimi private:
6065*b7893ccfSSadaf Ebrahimi uint32_t m_AllocationCount;
6066*b7893ccfSSadaf Ebrahimi bool m_AllAllocations;
6067*b7893ccfSSadaf Ebrahimi
6068*b7893ccfSSadaf Ebrahimi VkDeviceSize m_BytesMoved;
6069*b7893ccfSSadaf Ebrahimi uint32_t m_AllocationsMoved;
6070*b7893ccfSSadaf Ebrahimi
6071*b7893ccfSSadaf Ebrahimi struct AllocationInfoSizeGreater
6072*b7893ccfSSadaf Ebrahimi {
operatorAllocationInfoSizeGreater6073*b7893ccfSSadaf Ebrahimi bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6074*b7893ccfSSadaf Ebrahimi {
6075*b7893ccfSSadaf Ebrahimi return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6076*b7893ccfSSadaf Ebrahimi }
6077*b7893ccfSSadaf Ebrahimi };
6078*b7893ccfSSadaf Ebrahimi
6079*b7893ccfSSadaf Ebrahimi struct AllocationInfoOffsetGreater
6080*b7893ccfSSadaf Ebrahimi {
operatorAllocationInfoOffsetGreater6081*b7893ccfSSadaf Ebrahimi bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6082*b7893ccfSSadaf Ebrahimi {
6083*b7893ccfSSadaf Ebrahimi return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6084*b7893ccfSSadaf Ebrahimi }
6085*b7893ccfSSadaf Ebrahimi };
6086*b7893ccfSSadaf Ebrahimi
6087*b7893ccfSSadaf Ebrahimi struct BlockInfo
6088*b7893ccfSSadaf Ebrahimi {
6089*b7893ccfSSadaf Ebrahimi size_t m_OriginalBlockIndex;
6090*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* m_pBlock;
6091*b7893ccfSSadaf Ebrahimi bool m_HasNonMovableAllocations;
6092*b7893ccfSSadaf Ebrahimi VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6093*b7893ccfSSadaf Ebrahimi
BlockInfoBlockInfo6094*b7893ccfSSadaf Ebrahimi BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6095*b7893ccfSSadaf Ebrahimi m_OriginalBlockIndex(SIZE_MAX),
6096*b7893ccfSSadaf Ebrahimi m_pBlock(VMA_NULL),
6097*b7893ccfSSadaf Ebrahimi m_HasNonMovableAllocations(true),
6098*b7893ccfSSadaf Ebrahimi m_Allocations(pAllocationCallbacks)
6099*b7893ccfSSadaf Ebrahimi {
6100*b7893ccfSSadaf Ebrahimi }
6101*b7893ccfSSadaf Ebrahimi
CalcHasNonMovableAllocationsBlockInfo6102*b7893ccfSSadaf Ebrahimi void CalcHasNonMovableAllocations()
6103*b7893ccfSSadaf Ebrahimi {
6104*b7893ccfSSadaf Ebrahimi const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6105*b7893ccfSSadaf Ebrahimi const size_t defragmentAllocCount = m_Allocations.size();
6106*b7893ccfSSadaf Ebrahimi m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6107*b7893ccfSSadaf Ebrahimi }
6108*b7893ccfSSadaf Ebrahimi
SortAllocationsBySizeDescendingBlockInfo6109*b7893ccfSSadaf Ebrahimi void SortAllocationsBySizeDescending()
6110*b7893ccfSSadaf Ebrahimi {
6111*b7893ccfSSadaf Ebrahimi VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6112*b7893ccfSSadaf Ebrahimi }
6113*b7893ccfSSadaf Ebrahimi
SortAllocationsByOffsetDescendingBlockInfo6114*b7893ccfSSadaf Ebrahimi void SortAllocationsByOffsetDescending()
6115*b7893ccfSSadaf Ebrahimi {
6116*b7893ccfSSadaf Ebrahimi VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6117*b7893ccfSSadaf Ebrahimi }
6118*b7893ccfSSadaf Ebrahimi };
6119*b7893ccfSSadaf Ebrahimi
6120*b7893ccfSSadaf Ebrahimi struct BlockPointerLess
6121*b7893ccfSSadaf Ebrahimi {
operatorBlockPointerLess6122*b7893ccfSSadaf Ebrahimi bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6123*b7893ccfSSadaf Ebrahimi {
6124*b7893ccfSSadaf Ebrahimi return pLhsBlockInfo->m_pBlock < pRhsBlock;
6125*b7893ccfSSadaf Ebrahimi }
operatorBlockPointerLess6126*b7893ccfSSadaf Ebrahimi bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6127*b7893ccfSSadaf Ebrahimi {
6128*b7893ccfSSadaf Ebrahimi return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6129*b7893ccfSSadaf Ebrahimi }
6130*b7893ccfSSadaf Ebrahimi };
6131*b7893ccfSSadaf Ebrahimi
6132*b7893ccfSSadaf Ebrahimi // 1. Blocks with some non-movable allocations go first.
6133*b7893ccfSSadaf Ebrahimi // 2. Blocks with smaller sumFreeSize go first.
6134*b7893ccfSSadaf Ebrahimi struct BlockInfoCompareMoveDestination
6135*b7893ccfSSadaf Ebrahimi {
operatorBlockInfoCompareMoveDestination6136*b7893ccfSSadaf Ebrahimi bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6137*b7893ccfSSadaf Ebrahimi {
6138*b7893ccfSSadaf Ebrahimi if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6139*b7893ccfSSadaf Ebrahimi {
6140*b7893ccfSSadaf Ebrahimi return true;
6141*b7893ccfSSadaf Ebrahimi }
6142*b7893ccfSSadaf Ebrahimi if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6143*b7893ccfSSadaf Ebrahimi {
6144*b7893ccfSSadaf Ebrahimi return false;
6145*b7893ccfSSadaf Ebrahimi }
6146*b7893ccfSSadaf Ebrahimi if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6147*b7893ccfSSadaf Ebrahimi {
6148*b7893ccfSSadaf Ebrahimi return true;
6149*b7893ccfSSadaf Ebrahimi }
6150*b7893ccfSSadaf Ebrahimi return false;
6151*b7893ccfSSadaf Ebrahimi }
6152*b7893ccfSSadaf Ebrahimi };
6153*b7893ccfSSadaf Ebrahimi
6154*b7893ccfSSadaf Ebrahimi typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6155*b7893ccfSSadaf Ebrahimi BlockInfoVector m_Blocks;
6156*b7893ccfSSadaf Ebrahimi
6157*b7893ccfSSadaf Ebrahimi VkResult DefragmentRound(
6158*b7893ccfSSadaf Ebrahimi VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6159*b7893ccfSSadaf Ebrahimi VkDeviceSize maxBytesToMove,
6160*b7893ccfSSadaf Ebrahimi uint32_t maxAllocationsToMove);
6161*b7893ccfSSadaf Ebrahimi
6162*b7893ccfSSadaf Ebrahimi size_t CalcBlocksWithNonMovableCount() const;
6163*b7893ccfSSadaf Ebrahimi
6164*b7893ccfSSadaf Ebrahimi static bool MoveMakesSense(
6165*b7893ccfSSadaf Ebrahimi size_t dstBlockIndex, VkDeviceSize dstOffset,
6166*b7893ccfSSadaf Ebrahimi size_t srcBlockIndex, VkDeviceSize srcOffset);
6167*b7893ccfSSadaf Ebrahimi };
6168*b7893ccfSSadaf Ebrahimi
6169*b7893ccfSSadaf Ebrahimi class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6170*b7893ccfSSadaf Ebrahimi {
6171*b7893ccfSSadaf Ebrahimi VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6172*b7893ccfSSadaf Ebrahimi public:
6173*b7893ccfSSadaf Ebrahimi VmaDefragmentationAlgorithm_Fast(
6174*b7893ccfSSadaf Ebrahimi VmaAllocator hAllocator,
6175*b7893ccfSSadaf Ebrahimi VmaBlockVector* pBlockVector,
6176*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
6177*b7893ccfSSadaf Ebrahimi bool overlappingMoveSupported);
6178*b7893ccfSSadaf Ebrahimi virtual ~VmaDefragmentationAlgorithm_Fast();
6179*b7893ccfSSadaf Ebrahimi
AddAllocation(VmaAllocation hAlloc,VkBool32 * pChanged)6180*b7893ccfSSadaf Ebrahimi virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
AddAll()6181*b7893ccfSSadaf Ebrahimi virtual void AddAll() { m_AllAllocations = true; }
6182*b7893ccfSSadaf Ebrahimi
6183*b7893ccfSSadaf Ebrahimi virtual VkResult Defragment(
6184*b7893ccfSSadaf Ebrahimi VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6185*b7893ccfSSadaf Ebrahimi VkDeviceSize maxBytesToMove,
6186*b7893ccfSSadaf Ebrahimi uint32_t maxAllocationsToMove);
6187*b7893ccfSSadaf Ebrahimi
GetBytesMoved()6188*b7893ccfSSadaf Ebrahimi virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
GetAllocationsMoved()6189*b7893ccfSSadaf Ebrahimi virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6190*b7893ccfSSadaf Ebrahimi
6191*b7893ccfSSadaf Ebrahimi private:
6192*b7893ccfSSadaf Ebrahimi struct BlockInfo
6193*b7893ccfSSadaf Ebrahimi {
6194*b7893ccfSSadaf Ebrahimi size_t origBlockIndex;
6195*b7893ccfSSadaf Ebrahimi };
6196*b7893ccfSSadaf Ebrahimi
6197*b7893ccfSSadaf Ebrahimi class FreeSpaceDatabase
6198*b7893ccfSSadaf Ebrahimi {
6199*b7893ccfSSadaf Ebrahimi public:
FreeSpaceDatabase()6200*b7893ccfSSadaf Ebrahimi FreeSpaceDatabase()
6201*b7893ccfSSadaf Ebrahimi {
6202*b7893ccfSSadaf Ebrahimi FreeSpace s = {};
6203*b7893ccfSSadaf Ebrahimi s.blockInfoIndex = SIZE_MAX;
6204*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < MAX_COUNT; ++i)
6205*b7893ccfSSadaf Ebrahimi {
6206*b7893ccfSSadaf Ebrahimi m_FreeSpaces[i] = s;
6207*b7893ccfSSadaf Ebrahimi }
6208*b7893ccfSSadaf Ebrahimi }
6209*b7893ccfSSadaf Ebrahimi
Register(size_t blockInfoIndex,VkDeviceSize offset,VkDeviceSize size)6210*b7893ccfSSadaf Ebrahimi void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6211*b7893ccfSSadaf Ebrahimi {
6212*b7893ccfSSadaf Ebrahimi if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6213*b7893ccfSSadaf Ebrahimi {
6214*b7893ccfSSadaf Ebrahimi return;
6215*b7893ccfSSadaf Ebrahimi }
6216*b7893ccfSSadaf Ebrahimi
6217*b7893ccfSSadaf Ebrahimi // Find first invalid or the smallest structure.
6218*b7893ccfSSadaf Ebrahimi size_t bestIndex = SIZE_MAX;
6219*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < MAX_COUNT; ++i)
6220*b7893ccfSSadaf Ebrahimi {
6221*b7893ccfSSadaf Ebrahimi // Empty structure.
6222*b7893ccfSSadaf Ebrahimi if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6223*b7893ccfSSadaf Ebrahimi {
6224*b7893ccfSSadaf Ebrahimi bestIndex = i;
6225*b7893ccfSSadaf Ebrahimi break;
6226*b7893ccfSSadaf Ebrahimi }
6227*b7893ccfSSadaf Ebrahimi if(m_FreeSpaces[i].size < size &&
6228*b7893ccfSSadaf Ebrahimi (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6229*b7893ccfSSadaf Ebrahimi {
6230*b7893ccfSSadaf Ebrahimi bestIndex = i;
6231*b7893ccfSSadaf Ebrahimi }
6232*b7893ccfSSadaf Ebrahimi }
6233*b7893ccfSSadaf Ebrahimi
6234*b7893ccfSSadaf Ebrahimi if(bestIndex != SIZE_MAX)
6235*b7893ccfSSadaf Ebrahimi {
6236*b7893ccfSSadaf Ebrahimi m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6237*b7893ccfSSadaf Ebrahimi m_FreeSpaces[bestIndex].offset = offset;
6238*b7893ccfSSadaf Ebrahimi m_FreeSpaces[bestIndex].size = size;
6239*b7893ccfSSadaf Ebrahimi }
6240*b7893ccfSSadaf Ebrahimi }
6241*b7893ccfSSadaf Ebrahimi
Fetch(VkDeviceSize alignment,VkDeviceSize size,size_t & outBlockInfoIndex,VkDeviceSize & outDstOffset)6242*b7893ccfSSadaf Ebrahimi bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6243*b7893ccfSSadaf Ebrahimi size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6244*b7893ccfSSadaf Ebrahimi {
6245*b7893ccfSSadaf Ebrahimi size_t bestIndex = SIZE_MAX;
6246*b7893ccfSSadaf Ebrahimi VkDeviceSize bestFreeSpaceAfter = 0;
6247*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < MAX_COUNT; ++i)
6248*b7893ccfSSadaf Ebrahimi {
6249*b7893ccfSSadaf Ebrahimi // Structure is valid.
6250*b7893ccfSSadaf Ebrahimi if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6251*b7893ccfSSadaf Ebrahimi {
6252*b7893ccfSSadaf Ebrahimi const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6253*b7893ccfSSadaf Ebrahimi // Allocation fits into this structure.
6254*b7893ccfSSadaf Ebrahimi if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6255*b7893ccfSSadaf Ebrahimi {
6256*b7893ccfSSadaf Ebrahimi const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6257*b7893ccfSSadaf Ebrahimi (dstOffset + size);
6258*b7893ccfSSadaf Ebrahimi if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6259*b7893ccfSSadaf Ebrahimi {
6260*b7893ccfSSadaf Ebrahimi bestIndex = i;
6261*b7893ccfSSadaf Ebrahimi bestFreeSpaceAfter = freeSpaceAfter;
6262*b7893ccfSSadaf Ebrahimi }
6263*b7893ccfSSadaf Ebrahimi }
6264*b7893ccfSSadaf Ebrahimi }
6265*b7893ccfSSadaf Ebrahimi }
6266*b7893ccfSSadaf Ebrahimi
6267*b7893ccfSSadaf Ebrahimi if(bestIndex != SIZE_MAX)
6268*b7893ccfSSadaf Ebrahimi {
6269*b7893ccfSSadaf Ebrahimi outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6270*b7893ccfSSadaf Ebrahimi outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6271*b7893ccfSSadaf Ebrahimi
6272*b7893ccfSSadaf Ebrahimi if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6273*b7893ccfSSadaf Ebrahimi {
6274*b7893ccfSSadaf Ebrahimi // Leave this structure for remaining empty space.
6275*b7893ccfSSadaf Ebrahimi const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6276*b7893ccfSSadaf Ebrahimi m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6277*b7893ccfSSadaf Ebrahimi m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6278*b7893ccfSSadaf Ebrahimi }
6279*b7893ccfSSadaf Ebrahimi else
6280*b7893ccfSSadaf Ebrahimi {
6281*b7893ccfSSadaf Ebrahimi // This structure becomes invalid.
6282*b7893ccfSSadaf Ebrahimi m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6283*b7893ccfSSadaf Ebrahimi }
6284*b7893ccfSSadaf Ebrahimi
6285*b7893ccfSSadaf Ebrahimi return true;
6286*b7893ccfSSadaf Ebrahimi }
6287*b7893ccfSSadaf Ebrahimi
6288*b7893ccfSSadaf Ebrahimi return false;
6289*b7893ccfSSadaf Ebrahimi }
6290*b7893ccfSSadaf Ebrahimi
6291*b7893ccfSSadaf Ebrahimi private:
6292*b7893ccfSSadaf Ebrahimi static const size_t MAX_COUNT = 4;
6293*b7893ccfSSadaf Ebrahimi
6294*b7893ccfSSadaf Ebrahimi struct FreeSpace
6295*b7893ccfSSadaf Ebrahimi {
6296*b7893ccfSSadaf Ebrahimi size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6297*b7893ccfSSadaf Ebrahimi VkDeviceSize offset;
6298*b7893ccfSSadaf Ebrahimi VkDeviceSize size;
6299*b7893ccfSSadaf Ebrahimi } m_FreeSpaces[MAX_COUNT];
6300*b7893ccfSSadaf Ebrahimi };
6301*b7893ccfSSadaf Ebrahimi
6302*b7893ccfSSadaf Ebrahimi const bool m_OverlappingMoveSupported;
6303*b7893ccfSSadaf Ebrahimi
6304*b7893ccfSSadaf Ebrahimi uint32_t m_AllocationCount;
6305*b7893ccfSSadaf Ebrahimi bool m_AllAllocations;
6306*b7893ccfSSadaf Ebrahimi
6307*b7893ccfSSadaf Ebrahimi VkDeviceSize m_BytesMoved;
6308*b7893ccfSSadaf Ebrahimi uint32_t m_AllocationsMoved;
6309*b7893ccfSSadaf Ebrahimi
6310*b7893ccfSSadaf Ebrahimi VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6311*b7893ccfSSadaf Ebrahimi
6312*b7893ccfSSadaf Ebrahimi void PreprocessMetadata();
6313*b7893ccfSSadaf Ebrahimi void PostprocessMetadata();
6314*b7893ccfSSadaf Ebrahimi void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6315*b7893ccfSSadaf Ebrahimi };
6316*b7893ccfSSadaf Ebrahimi
6317*b7893ccfSSadaf Ebrahimi struct VmaBlockDefragmentationContext
6318*b7893ccfSSadaf Ebrahimi {
6319*b7893ccfSSadaf Ebrahimi enum BLOCK_FLAG
6320*b7893ccfSSadaf Ebrahimi {
6321*b7893ccfSSadaf Ebrahimi BLOCK_FLAG_USED = 0x00000001,
6322*b7893ccfSSadaf Ebrahimi };
6323*b7893ccfSSadaf Ebrahimi uint32_t flags;
6324*b7893ccfSSadaf Ebrahimi VkBuffer hBuffer;
6325*b7893ccfSSadaf Ebrahimi
VmaBlockDefragmentationContextVmaBlockDefragmentationContext6326*b7893ccfSSadaf Ebrahimi VmaBlockDefragmentationContext() :
6327*b7893ccfSSadaf Ebrahimi flags(0),
6328*b7893ccfSSadaf Ebrahimi hBuffer(VK_NULL_HANDLE)
6329*b7893ccfSSadaf Ebrahimi {
6330*b7893ccfSSadaf Ebrahimi }
6331*b7893ccfSSadaf Ebrahimi };
6332*b7893ccfSSadaf Ebrahimi
6333*b7893ccfSSadaf Ebrahimi class VmaBlockVectorDefragmentationContext
6334*b7893ccfSSadaf Ebrahimi {
6335*b7893ccfSSadaf Ebrahimi VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6336*b7893ccfSSadaf Ebrahimi public:
6337*b7893ccfSSadaf Ebrahimi VkResult res;
6338*b7893ccfSSadaf Ebrahimi bool mutexLocked;
6339*b7893ccfSSadaf Ebrahimi VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6340*b7893ccfSSadaf Ebrahimi
6341*b7893ccfSSadaf Ebrahimi VmaBlockVectorDefragmentationContext(
6342*b7893ccfSSadaf Ebrahimi VmaAllocator hAllocator,
6343*b7893ccfSSadaf Ebrahimi VmaPool hCustomPool, // Optional.
6344*b7893ccfSSadaf Ebrahimi VmaBlockVector* pBlockVector,
6345*b7893ccfSSadaf Ebrahimi uint32_t currFrameIndex,
6346*b7893ccfSSadaf Ebrahimi uint32_t flags);
6347*b7893ccfSSadaf Ebrahimi ~VmaBlockVectorDefragmentationContext();
6348*b7893ccfSSadaf Ebrahimi
GetCustomPool()6349*b7893ccfSSadaf Ebrahimi VmaPool GetCustomPool() const { return m_hCustomPool; }
GetBlockVector()6350*b7893ccfSSadaf Ebrahimi VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
GetAlgorithm()6351*b7893ccfSSadaf Ebrahimi VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6352*b7893ccfSSadaf Ebrahimi
6353*b7893ccfSSadaf Ebrahimi void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
AddAll()6354*b7893ccfSSadaf Ebrahimi void AddAll() { m_AllAllocations = true; }
6355*b7893ccfSSadaf Ebrahimi
6356*b7893ccfSSadaf Ebrahimi void Begin(bool overlappingMoveSupported);
6357*b7893ccfSSadaf Ebrahimi
6358*b7893ccfSSadaf Ebrahimi private:
6359*b7893ccfSSadaf Ebrahimi const VmaAllocator m_hAllocator;
6360*b7893ccfSSadaf Ebrahimi // Null if not from custom pool.
6361*b7893ccfSSadaf Ebrahimi const VmaPool m_hCustomPool;
6362*b7893ccfSSadaf Ebrahimi // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6363*b7893ccfSSadaf Ebrahimi VmaBlockVector* const m_pBlockVector;
6364*b7893ccfSSadaf Ebrahimi const uint32_t m_CurrFrameIndex;
6365*b7893ccfSSadaf Ebrahimi //const uint32_t m_AlgorithmFlags;
6366*b7893ccfSSadaf Ebrahimi // Owner of this object.
6367*b7893ccfSSadaf Ebrahimi VmaDefragmentationAlgorithm* m_pAlgorithm;
6368*b7893ccfSSadaf Ebrahimi
6369*b7893ccfSSadaf Ebrahimi struct AllocInfo
6370*b7893ccfSSadaf Ebrahimi {
6371*b7893ccfSSadaf Ebrahimi VmaAllocation hAlloc;
6372*b7893ccfSSadaf Ebrahimi VkBool32* pChanged;
6373*b7893ccfSSadaf Ebrahimi };
6374*b7893ccfSSadaf Ebrahimi // Used between constructor and Begin.
6375*b7893ccfSSadaf Ebrahimi VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6376*b7893ccfSSadaf Ebrahimi bool m_AllAllocations;
6377*b7893ccfSSadaf Ebrahimi };
6378*b7893ccfSSadaf Ebrahimi
6379*b7893ccfSSadaf Ebrahimi struct VmaDefragmentationContext_T
6380*b7893ccfSSadaf Ebrahimi {
6381*b7893ccfSSadaf Ebrahimi private:
6382*b7893ccfSSadaf Ebrahimi VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6383*b7893ccfSSadaf Ebrahimi public:
6384*b7893ccfSSadaf Ebrahimi VmaDefragmentationContext_T(
6385*b7893ccfSSadaf Ebrahimi VmaAllocator hAllocator,
6386*b7893ccfSSadaf Ebrahimi uint32_t currFrameIndex,
6387*b7893ccfSSadaf Ebrahimi uint32_t flags,
6388*b7893ccfSSadaf Ebrahimi VmaDefragmentationStats* pStats);
6389*b7893ccfSSadaf Ebrahimi ~VmaDefragmentationContext_T();
6390*b7893ccfSSadaf Ebrahimi
6391*b7893ccfSSadaf Ebrahimi void AddPools(uint32_t poolCount, VmaPool* pPools);
6392*b7893ccfSSadaf Ebrahimi void AddAllocations(
6393*b7893ccfSSadaf Ebrahimi uint32_t allocationCount,
6394*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocations,
6395*b7893ccfSSadaf Ebrahimi VkBool32* pAllocationsChanged);
6396*b7893ccfSSadaf Ebrahimi
6397*b7893ccfSSadaf Ebrahimi /*
6398*b7893ccfSSadaf Ebrahimi Returns:
6399*b7893ccfSSadaf Ebrahimi - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6400*b7893ccfSSadaf Ebrahimi - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6401*b7893ccfSSadaf Ebrahimi - Negative value if error occured and object can be destroyed immediately.
6402*b7893ccfSSadaf Ebrahimi */
6403*b7893ccfSSadaf Ebrahimi VkResult Defragment(
6404*b7893ccfSSadaf Ebrahimi VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6405*b7893ccfSSadaf Ebrahimi VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6406*b7893ccfSSadaf Ebrahimi VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6407*b7893ccfSSadaf Ebrahimi
6408*b7893ccfSSadaf Ebrahimi private:
6409*b7893ccfSSadaf Ebrahimi const VmaAllocator m_hAllocator;
6410*b7893ccfSSadaf Ebrahimi const uint32_t m_CurrFrameIndex;
6411*b7893ccfSSadaf Ebrahimi const uint32_t m_Flags;
6412*b7893ccfSSadaf Ebrahimi VmaDefragmentationStats* const m_pStats;
6413*b7893ccfSSadaf Ebrahimi // Owner of these objects.
6414*b7893ccfSSadaf Ebrahimi VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6415*b7893ccfSSadaf Ebrahimi // Owner of these objects.
6416*b7893ccfSSadaf Ebrahimi VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6417*b7893ccfSSadaf Ebrahimi };
6418*b7893ccfSSadaf Ebrahimi
6419*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
6420*b7893ccfSSadaf Ebrahimi
6421*b7893ccfSSadaf Ebrahimi class VmaRecorder
6422*b7893ccfSSadaf Ebrahimi {
6423*b7893ccfSSadaf Ebrahimi public:
6424*b7893ccfSSadaf Ebrahimi VmaRecorder();
6425*b7893ccfSSadaf Ebrahimi VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6426*b7893ccfSSadaf Ebrahimi void WriteConfiguration(
6427*b7893ccfSSadaf Ebrahimi const VkPhysicalDeviceProperties& devProps,
6428*b7893ccfSSadaf Ebrahimi const VkPhysicalDeviceMemoryProperties& memProps,
6429*b7893ccfSSadaf Ebrahimi bool dedicatedAllocationExtensionEnabled);
6430*b7893ccfSSadaf Ebrahimi ~VmaRecorder();
6431*b7893ccfSSadaf Ebrahimi
6432*b7893ccfSSadaf Ebrahimi void RecordCreateAllocator(uint32_t frameIndex);
6433*b7893ccfSSadaf Ebrahimi void RecordDestroyAllocator(uint32_t frameIndex);
6434*b7893ccfSSadaf Ebrahimi void RecordCreatePool(uint32_t frameIndex,
6435*b7893ccfSSadaf Ebrahimi const VmaPoolCreateInfo& createInfo,
6436*b7893ccfSSadaf Ebrahimi VmaPool pool);
6437*b7893ccfSSadaf Ebrahimi void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6438*b7893ccfSSadaf Ebrahimi void RecordAllocateMemory(uint32_t frameIndex,
6439*b7893ccfSSadaf Ebrahimi const VkMemoryRequirements& vkMemReq,
6440*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo& createInfo,
6441*b7893ccfSSadaf Ebrahimi VmaAllocation allocation);
6442*b7893ccfSSadaf Ebrahimi void RecordAllocateMemoryPages(uint32_t frameIndex,
6443*b7893ccfSSadaf Ebrahimi const VkMemoryRequirements& vkMemReq,
6444*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo& createInfo,
6445*b7893ccfSSadaf Ebrahimi uint64_t allocationCount,
6446*b7893ccfSSadaf Ebrahimi const VmaAllocation* pAllocations);
6447*b7893ccfSSadaf Ebrahimi void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6448*b7893ccfSSadaf Ebrahimi const VkMemoryRequirements& vkMemReq,
6449*b7893ccfSSadaf Ebrahimi bool requiresDedicatedAllocation,
6450*b7893ccfSSadaf Ebrahimi bool prefersDedicatedAllocation,
6451*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo& createInfo,
6452*b7893ccfSSadaf Ebrahimi VmaAllocation allocation);
6453*b7893ccfSSadaf Ebrahimi void RecordAllocateMemoryForImage(uint32_t frameIndex,
6454*b7893ccfSSadaf Ebrahimi const VkMemoryRequirements& vkMemReq,
6455*b7893ccfSSadaf Ebrahimi bool requiresDedicatedAllocation,
6456*b7893ccfSSadaf Ebrahimi bool prefersDedicatedAllocation,
6457*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo& createInfo,
6458*b7893ccfSSadaf Ebrahimi VmaAllocation allocation);
6459*b7893ccfSSadaf Ebrahimi void RecordFreeMemory(uint32_t frameIndex,
6460*b7893ccfSSadaf Ebrahimi VmaAllocation allocation);
6461*b7893ccfSSadaf Ebrahimi void RecordFreeMemoryPages(uint32_t frameIndex,
6462*b7893ccfSSadaf Ebrahimi uint64_t allocationCount,
6463*b7893ccfSSadaf Ebrahimi const VmaAllocation* pAllocations);
6464*b7893ccfSSadaf Ebrahimi void RecordResizeAllocation(
6465*b7893ccfSSadaf Ebrahimi uint32_t frameIndex,
6466*b7893ccfSSadaf Ebrahimi VmaAllocation allocation,
6467*b7893ccfSSadaf Ebrahimi VkDeviceSize newSize);
6468*b7893ccfSSadaf Ebrahimi void RecordSetAllocationUserData(uint32_t frameIndex,
6469*b7893ccfSSadaf Ebrahimi VmaAllocation allocation,
6470*b7893ccfSSadaf Ebrahimi const void* pUserData);
6471*b7893ccfSSadaf Ebrahimi void RecordCreateLostAllocation(uint32_t frameIndex,
6472*b7893ccfSSadaf Ebrahimi VmaAllocation allocation);
6473*b7893ccfSSadaf Ebrahimi void RecordMapMemory(uint32_t frameIndex,
6474*b7893ccfSSadaf Ebrahimi VmaAllocation allocation);
6475*b7893ccfSSadaf Ebrahimi void RecordUnmapMemory(uint32_t frameIndex,
6476*b7893ccfSSadaf Ebrahimi VmaAllocation allocation);
6477*b7893ccfSSadaf Ebrahimi void RecordFlushAllocation(uint32_t frameIndex,
6478*b7893ccfSSadaf Ebrahimi VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6479*b7893ccfSSadaf Ebrahimi void RecordInvalidateAllocation(uint32_t frameIndex,
6480*b7893ccfSSadaf Ebrahimi VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6481*b7893ccfSSadaf Ebrahimi void RecordCreateBuffer(uint32_t frameIndex,
6482*b7893ccfSSadaf Ebrahimi const VkBufferCreateInfo& bufCreateInfo,
6483*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo& allocCreateInfo,
6484*b7893ccfSSadaf Ebrahimi VmaAllocation allocation);
6485*b7893ccfSSadaf Ebrahimi void RecordCreateImage(uint32_t frameIndex,
6486*b7893ccfSSadaf Ebrahimi const VkImageCreateInfo& imageCreateInfo,
6487*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo& allocCreateInfo,
6488*b7893ccfSSadaf Ebrahimi VmaAllocation allocation);
6489*b7893ccfSSadaf Ebrahimi void RecordDestroyBuffer(uint32_t frameIndex,
6490*b7893ccfSSadaf Ebrahimi VmaAllocation allocation);
6491*b7893ccfSSadaf Ebrahimi void RecordDestroyImage(uint32_t frameIndex,
6492*b7893ccfSSadaf Ebrahimi VmaAllocation allocation);
6493*b7893ccfSSadaf Ebrahimi void RecordTouchAllocation(uint32_t frameIndex,
6494*b7893ccfSSadaf Ebrahimi VmaAllocation allocation);
6495*b7893ccfSSadaf Ebrahimi void RecordGetAllocationInfo(uint32_t frameIndex,
6496*b7893ccfSSadaf Ebrahimi VmaAllocation allocation);
6497*b7893ccfSSadaf Ebrahimi void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6498*b7893ccfSSadaf Ebrahimi VmaPool pool);
6499*b7893ccfSSadaf Ebrahimi void RecordDefragmentationBegin(uint32_t frameIndex,
6500*b7893ccfSSadaf Ebrahimi const VmaDefragmentationInfo2& info,
6501*b7893ccfSSadaf Ebrahimi VmaDefragmentationContext ctx);
6502*b7893ccfSSadaf Ebrahimi void RecordDefragmentationEnd(uint32_t frameIndex,
6503*b7893ccfSSadaf Ebrahimi VmaDefragmentationContext ctx);
6504*b7893ccfSSadaf Ebrahimi
6505*b7893ccfSSadaf Ebrahimi private:
6506*b7893ccfSSadaf Ebrahimi struct CallParams
6507*b7893ccfSSadaf Ebrahimi {
6508*b7893ccfSSadaf Ebrahimi uint32_t threadId;
6509*b7893ccfSSadaf Ebrahimi double time;
6510*b7893ccfSSadaf Ebrahimi };
6511*b7893ccfSSadaf Ebrahimi
6512*b7893ccfSSadaf Ebrahimi class UserDataString
6513*b7893ccfSSadaf Ebrahimi {
6514*b7893ccfSSadaf Ebrahimi public:
6515*b7893ccfSSadaf Ebrahimi UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
GetString()6516*b7893ccfSSadaf Ebrahimi const char* GetString() const { return m_Str; }
6517*b7893ccfSSadaf Ebrahimi
6518*b7893ccfSSadaf Ebrahimi private:
6519*b7893ccfSSadaf Ebrahimi char m_PtrStr[17];
6520*b7893ccfSSadaf Ebrahimi const char* m_Str;
6521*b7893ccfSSadaf Ebrahimi };
6522*b7893ccfSSadaf Ebrahimi
6523*b7893ccfSSadaf Ebrahimi bool m_UseMutex;
6524*b7893ccfSSadaf Ebrahimi VmaRecordFlags m_Flags;
6525*b7893ccfSSadaf Ebrahimi FILE* m_File;
6526*b7893ccfSSadaf Ebrahimi VMA_MUTEX m_FileMutex;
6527*b7893ccfSSadaf Ebrahimi int64_t m_Freq;
6528*b7893ccfSSadaf Ebrahimi int64_t m_StartCounter;
6529*b7893ccfSSadaf Ebrahimi
6530*b7893ccfSSadaf Ebrahimi void GetBasicParams(CallParams& outParams);
6531*b7893ccfSSadaf Ebrahimi
6532*b7893ccfSSadaf Ebrahimi // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6533*b7893ccfSSadaf Ebrahimi template<typename T>
PrintPointerList(uint64_t count,const T * pItems)6534*b7893ccfSSadaf Ebrahimi void PrintPointerList(uint64_t count, const T* pItems)
6535*b7893ccfSSadaf Ebrahimi {
6536*b7893ccfSSadaf Ebrahimi if(count)
6537*b7893ccfSSadaf Ebrahimi {
6538*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%p", pItems[0]);
6539*b7893ccfSSadaf Ebrahimi for(uint64_t i = 1; i < count; ++i)
6540*b7893ccfSSadaf Ebrahimi {
6541*b7893ccfSSadaf Ebrahimi fprintf(m_File, " %p", pItems[i]);
6542*b7893ccfSSadaf Ebrahimi }
6543*b7893ccfSSadaf Ebrahimi }
6544*b7893ccfSSadaf Ebrahimi }
6545*b7893ccfSSadaf Ebrahimi
6546*b7893ccfSSadaf Ebrahimi void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6547*b7893ccfSSadaf Ebrahimi void Flush();
6548*b7893ccfSSadaf Ebrahimi };
6549*b7893ccfSSadaf Ebrahimi
6550*b7893ccfSSadaf Ebrahimi #endif // #if VMA_RECORDING_ENABLED
6551*b7893ccfSSadaf Ebrahimi
6552*b7893ccfSSadaf Ebrahimi // Main allocator object.
6553*b7893ccfSSadaf Ebrahimi struct VmaAllocator_T
6554*b7893ccfSSadaf Ebrahimi {
6555*b7893ccfSSadaf Ebrahimi VMA_CLASS_NO_COPY(VmaAllocator_T)
6556*b7893ccfSSadaf Ebrahimi public:
6557*b7893ccfSSadaf Ebrahimi bool m_UseMutex;
6558*b7893ccfSSadaf Ebrahimi bool m_UseKhrDedicatedAllocation;
6559*b7893ccfSSadaf Ebrahimi VkDevice m_hDevice;
6560*b7893ccfSSadaf Ebrahimi bool m_AllocationCallbacksSpecified;
6561*b7893ccfSSadaf Ebrahimi VkAllocationCallbacks m_AllocationCallbacks;
6562*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6563*b7893ccfSSadaf Ebrahimi
6564*b7893ccfSSadaf Ebrahimi // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6565*b7893ccfSSadaf Ebrahimi VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6566*b7893ccfSSadaf Ebrahimi VMA_MUTEX m_HeapSizeLimitMutex;
6567*b7893ccfSSadaf Ebrahimi
6568*b7893ccfSSadaf Ebrahimi VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6569*b7893ccfSSadaf Ebrahimi VkPhysicalDeviceMemoryProperties m_MemProps;
6570*b7893ccfSSadaf Ebrahimi
6571*b7893ccfSSadaf Ebrahimi // Default pools.
6572*b7893ccfSSadaf Ebrahimi VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6573*b7893ccfSSadaf Ebrahimi
6574*b7893ccfSSadaf Ebrahimi // Each vector is sorted by memory (handle value).
6575*b7893ccfSSadaf Ebrahimi typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6576*b7893ccfSSadaf Ebrahimi AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6577*b7893ccfSSadaf Ebrahimi VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6578*b7893ccfSSadaf Ebrahimi
6579*b7893ccfSSadaf Ebrahimi VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6580*b7893ccfSSadaf Ebrahimi VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6581*b7893ccfSSadaf Ebrahimi ~VmaAllocator_T();
6582*b7893ccfSSadaf Ebrahimi
GetAllocationCallbacksVmaAllocator_T6583*b7893ccfSSadaf Ebrahimi const VkAllocationCallbacks* GetAllocationCallbacks() const
6584*b7893ccfSSadaf Ebrahimi {
6585*b7893ccfSSadaf Ebrahimi return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6586*b7893ccfSSadaf Ebrahimi }
GetVulkanFunctionsVmaAllocator_T6587*b7893ccfSSadaf Ebrahimi const VmaVulkanFunctions& GetVulkanFunctions() const
6588*b7893ccfSSadaf Ebrahimi {
6589*b7893ccfSSadaf Ebrahimi return m_VulkanFunctions;
6590*b7893ccfSSadaf Ebrahimi }
6591*b7893ccfSSadaf Ebrahimi
GetBufferImageGranularityVmaAllocator_T6592*b7893ccfSSadaf Ebrahimi VkDeviceSize GetBufferImageGranularity() const
6593*b7893ccfSSadaf Ebrahimi {
6594*b7893ccfSSadaf Ebrahimi return VMA_MAX(
6595*b7893ccfSSadaf Ebrahimi static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6596*b7893ccfSSadaf Ebrahimi m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6597*b7893ccfSSadaf Ebrahimi }
6598*b7893ccfSSadaf Ebrahimi
GetMemoryHeapCountVmaAllocator_T6599*b7893ccfSSadaf Ebrahimi uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
GetMemoryTypeCountVmaAllocator_T6600*b7893ccfSSadaf Ebrahimi uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6601*b7893ccfSSadaf Ebrahimi
MemoryTypeIndexToHeapIndexVmaAllocator_T6602*b7893ccfSSadaf Ebrahimi uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6603*b7893ccfSSadaf Ebrahimi {
6604*b7893ccfSSadaf Ebrahimi VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6605*b7893ccfSSadaf Ebrahimi return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6606*b7893ccfSSadaf Ebrahimi }
6607*b7893ccfSSadaf Ebrahimi // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
IsMemoryTypeNonCoherentVmaAllocator_T6608*b7893ccfSSadaf Ebrahimi bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6609*b7893ccfSSadaf Ebrahimi {
6610*b7893ccfSSadaf Ebrahimi return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6611*b7893ccfSSadaf Ebrahimi VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6612*b7893ccfSSadaf Ebrahimi }
6613*b7893ccfSSadaf Ebrahimi // Minimum alignment for all allocations in specific memory type.
GetMemoryTypeMinAlignmentVmaAllocator_T6614*b7893ccfSSadaf Ebrahimi VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6615*b7893ccfSSadaf Ebrahimi {
6616*b7893ccfSSadaf Ebrahimi return IsMemoryTypeNonCoherent(memTypeIndex) ?
6617*b7893ccfSSadaf Ebrahimi VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6618*b7893ccfSSadaf Ebrahimi (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6619*b7893ccfSSadaf Ebrahimi }
6620*b7893ccfSSadaf Ebrahimi
IsIntegratedGpuVmaAllocator_T6621*b7893ccfSSadaf Ebrahimi bool IsIntegratedGpu() const
6622*b7893ccfSSadaf Ebrahimi {
6623*b7893ccfSSadaf Ebrahimi return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6624*b7893ccfSSadaf Ebrahimi }
6625*b7893ccfSSadaf Ebrahimi
6626*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
GetRecorderVmaAllocator_T6627*b7893ccfSSadaf Ebrahimi VmaRecorder* GetRecorder() const { return m_pRecorder; }
6628*b7893ccfSSadaf Ebrahimi #endif
6629*b7893ccfSSadaf Ebrahimi
6630*b7893ccfSSadaf Ebrahimi void GetBufferMemoryRequirements(
6631*b7893ccfSSadaf Ebrahimi VkBuffer hBuffer,
6632*b7893ccfSSadaf Ebrahimi VkMemoryRequirements& memReq,
6633*b7893ccfSSadaf Ebrahimi bool& requiresDedicatedAllocation,
6634*b7893ccfSSadaf Ebrahimi bool& prefersDedicatedAllocation) const;
6635*b7893ccfSSadaf Ebrahimi void GetImageMemoryRequirements(
6636*b7893ccfSSadaf Ebrahimi VkImage hImage,
6637*b7893ccfSSadaf Ebrahimi VkMemoryRequirements& memReq,
6638*b7893ccfSSadaf Ebrahimi bool& requiresDedicatedAllocation,
6639*b7893ccfSSadaf Ebrahimi bool& prefersDedicatedAllocation) const;
6640*b7893ccfSSadaf Ebrahimi
6641*b7893ccfSSadaf Ebrahimi // Main allocation function.
6642*b7893ccfSSadaf Ebrahimi VkResult AllocateMemory(
6643*b7893ccfSSadaf Ebrahimi const VkMemoryRequirements& vkMemReq,
6644*b7893ccfSSadaf Ebrahimi bool requiresDedicatedAllocation,
6645*b7893ccfSSadaf Ebrahimi bool prefersDedicatedAllocation,
6646*b7893ccfSSadaf Ebrahimi VkBuffer dedicatedBuffer,
6647*b7893ccfSSadaf Ebrahimi VkImage dedicatedImage,
6648*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo& createInfo,
6649*b7893ccfSSadaf Ebrahimi VmaSuballocationType suballocType,
6650*b7893ccfSSadaf Ebrahimi size_t allocationCount,
6651*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocations);
6652*b7893ccfSSadaf Ebrahimi
6653*b7893ccfSSadaf Ebrahimi // Main deallocation function.
6654*b7893ccfSSadaf Ebrahimi void FreeMemory(
6655*b7893ccfSSadaf Ebrahimi size_t allocationCount,
6656*b7893ccfSSadaf Ebrahimi const VmaAllocation* pAllocations);
6657*b7893ccfSSadaf Ebrahimi
6658*b7893ccfSSadaf Ebrahimi VkResult ResizeAllocation(
6659*b7893ccfSSadaf Ebrahimi const VmaAllocation alloc,
6660*b7893ccfSSadaf Ebrahimi VkDeviceSize newSize);
6661*b7893ccfSSadaf Ebrahimi
6662*b7893ccfSSadaf Ebrahimi void CalculateStats(VmaStats* pStats);
6663*b7893ccfSSadaf Ebrahimi
6664*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
6665*b7893ccfSSadaf Ebrahimi void PrintDetailedMap(class VmaJsonWriter& json);
6666*b7893ccfSSadaf Ebrahimi #endif
6667*b7893ccfSSadaf Ebrahimi
6668*b7893ccfSSadaf Ebrahimi VkResult DefragmentationBegin(
6669*b7893ccfSSadaf Ebrahimi const VmaDefragmentationInfo2& info,
6670*b7893ccfSSadaf Ebrahimi VmaDefragmentationStats* pStats,
6671*b7893ccfSSadaf Ebrahimi VmaDefragmentationContext* pContext);
6672*b7893ccfSSadaf Ebrahimi VkResult DefragmentationEnd(
6673*b7893ccfSSadaf Ebrahimi VmaDefragmentationContext context);
6674*b7893ccfSSadaf Ebrahimi
6675*b7893ccfSSadaf Ebrahimi void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6676*b7893ccfSSadaf Ebrahimi bool TouchAllocation(VmaAllocation hAllocation);
6677*b7893ccfSSadaf Ebrahimi
6678*b7893ccfSSadaf Ebrahimi VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6679*b7893ccfSSadaf Ebrahimi void DestroyPool(VmaPool pool);
6680*b7893ccfSSadaf Ebrahimi void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6681*b7893ccfSSadaf Ebrahimi
6682*b7893ccfSSadaf Ebrahimi void SetCurrentFrameIndex(uint32_t frameIndex);
GetCurrentFrameIndexVmaAllocator_T6683*b7893ccfSSadaf Ebrahimi uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6684*b7893ccfSSadaf Ebrahimi
6685*b7893ccfSSadaf Ebrahimi void MakePoolAllocationsLost(
6686*b7893ccfSSadaf Ebrahimi VmaPool hPool,
6687*b7893ccfSSadaf Ebrahimi size_t* pLostAllocationCount);
6688*b7893ccfSSadaf Ebrahimi VkResult CheckPoolCorruption(VmaPool hPool);
6689*b7893ccfSSadaf Ebrahimi VkResult CheckCorruption(uint32_t memoryTypeBits);
6690*b7893ccfSSadaf Ebrahimi
6691*b7893ccfSSadaf Ebrahimi void CreateLostAllocation(VmaAllocation* pAllocation);
6692*b7893ccfSSadaf Ebrahimi
6693*b7893ccfSSadaf Ebrahimi VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6694*b7893ccfSSadaf Ebrahimi void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6695*b7893ccfSSadaf Ebrahimi
6696*b7893ccfSSadaf Ebrahimi VkResult Map(VmaAllocation hAllocation, void** ppData);
6697*b7893ccfSSadaf Ebrahimi void Unmap(VmaAllocation hAllocation);
6698*b7893ccfSSadaf Ebrahimi
6699*b7893ccfSSadaf Ebrahimi VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6700*b7893ccfSSadaf Ebrahimi VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6701*b7893ccfSSadaf Ebrahimi
6702*b7893ccfSSadaf Ebrahimi void FlushOrInvalidateAllocation(
6703*b7893ccfSSadaf Ebrahimi VmaAllocation hAllocation,
6704*b7893ccfSSadaf Ebrahimi VkDeviceSize offset, VkDeviceSize size,
6705*b7893ccfSSadaf Ebrahimi VMA_CACHE_OPERATION op);
6706*b7893ccfSSadaf Ebrahimi
6707*b7893ccfSSadaf Ebrahimi void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6708*b7893ccfSSadaf Ebrahimi
6709*b7893ccfSSadaf Ebrahimi private:
6710*b7893ccfSSadaf Ebrahimi VkDeviceSize m_PreferredLargeHeapBlockSize;
6711*b7893ccfSSadaf Ebrahimi
6712*b7893ccfSSadaf Ebrahimi VkPhysicalDevice m_PhysicalDevice;
6713*b7893ccfSSadaf Ebrahimi VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6714*b7893ccfSSadaf Ebrahimi
6715*b7893ccfSSadaf Ebrahimi VMA_RW_MUTEX m_PoolsMutex;
6716*b7893ccfSSadaf Ebrahimi // Protected by m_PoolsMutex. Sorted by pointer value.
6717*b7893ccfSSadaf Ebrahimi VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6718*b7893ccfSSadaf Ebrahimi uint32_t m_NextPoolId;
6719*b7893ccfSSadaf Ebrahimi
6720*b7893ccfSSadaf Ebrahimi VmaVulkanFunctions m_VulkanFunctions;
6721*b7893ccfSSadaf Ebrahimi
6722*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
6723*b7893ccfSSadaf Ebrahimi VmaRecorder* m_pRecorder;
6724*b7893ccfSSadaf Ebrahimi #endif
6725*b7893ccfSSadaf Ebrahimi
6726*b7893ccfSSadaf Ebrahimi void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6727*b7893ccfSSadaf Ebrahimi
6728*b7893ccfSSadaf Ebrahimi VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6729*b7893ccfSSadaf Ebrahimi
6730*b7893ccfSSadaf Ebrahimi VkResult AllocateMemoryOfType(
6731*b7893ccfSSadaf Ebrahimi VkDeviceSize size,
6732*b7893ccfSSadaf Ebrahimi VkDeviceSize alignment,
6733*b7893ccfSSadaf Ebrahimi bool dedicatedAllocation,
6734*b7893ccfSSadaf Ebrahimi VkBuffer dedicatedBuffer,
6735*b7893ccfSSadaf Ebrahimi VkImage dedicatedImage,
6736*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo& createInfo,
6737*b7893ccfSSadaf Ebrahimi uint32_t memTypeIndex,
6738*b7893ccfSSadaf Ebrahimi VmaSuballocationType suballocType,
6739*b7893ccfSSadaf Ebrahimi size_t allocationCount,
6740*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocations);
6741*b7893ccfSSadaf Ebrahimi
6742*b7893ccfSSadaf Ebrahimi // Helper function only to be used inside AllocateDedicatedMemory.
6743*b7893ccfSSadaf Ebrahimi VkResult AllocateDedicatedMemoryPage(
6744*b7893ccfSSadaf Ebrahimi VkDeviceSize size,
6745*b7893ccfSSadaf Ebrahimi VmaSuballocationType suballocType,
6746*b7893ccfSSadaf Ebrahimi uint32_t memTypeIndex,
6747*b7893ccfSSadaf Ebrahimi const VkMemoryAllocateInfo& allocInfo,
6748*b7893ccfSSadaf Ebrahimi bool map,
6749*b7893ccfSSadaf Ebrahimi bool isUserDataString,
6750*b7893ccfSSadaf Ebrahimi void* pUserData,
6751*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocation);
6752*b7893ccfSSadaf Ebrahimi
6753*b7893ccfSSadaf Ebrahimi // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
6754*b7893ccfSSadaf Ebrahimi VkResult AllocateDedicatedMemory(
6755*b7893ccfSSadaf Ebrahimi VkDeviceSize size,
6756*b7893ccfSSadaf Ebrahimi VmaSuballocationType suballocType,
6757*b7893ccfSSadaf Ebrahimi uint32_t memTypeIndex,
6758*b7893ccfSSadaf Ebrahimi bool map,
6759*b7893ccfSSadaf Ebrahimi bool isUserDataString,
6760*b7893ccfSSadaf Ebrahimi void* pUserData,
6761*b7893ccfSSadaf Ebrahimi VkBuffer dedicatedBuffer,
6762*b7893ccfSSadaf Ebrahimi VkImage dedicatedImage,
6763*b7893ccfSSadaf Ebrahimi size_t allocationCount,
6764*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocations);
6765*b7893ccfSSadaf Ebrahimi
6766*b7893ccfSSadaf Ebrahimi // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
6767*b7893ccfSSadaf Ebrahimi void FreeDedicatedMemory(VmaAllocation allocation);
6768*b7893ccfSSadaf Ebrahimi };
6769*b7893ccfSSadaf Ebrahimi
6770*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
6771*b7893ccfSSadaf Ebrahimi // Memory allocation #2 after VmaAllocator_T definition
6772*b7893ccfSSadaf Ebrahimi
VmaMalloc(VmaAllocator hAllocator,size_t size,size_t alignment)6773*b7893ccfSSadaf Ebrahimi static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6774*b7893ccfSSadaf Ebrahimi {
6775*b7893ccfSSadaf Ebrahimi return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6776*b7893ccfSSadaf Ebrahimi }
6777*b7893ccfSSadaf Ebrahimi
VmaFree(VmaAllocator hAllocator,void * ptr)6778*b7893ccfSSadaf Ebrahimi static void VmaFree(VmaAllocator hAllocator, void* ptr)
6779*b7893ccfSSadaf Ebrahimi {
6780*b7893ccfSSadaf Ebrahimi VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6781*b7893ccfSSadaf Ebrahimi }
6782*b7893ccfSSadaf Ebrahimi
6783*b7893ccfSSadaf Ebrahimi template<typename T>
VmaAllocate(VmaAllocator hAllocator)6784*b7893ccfSSadaf Ebrahimi static T* VmaAllocate(VmaAllocator hAllocator)
6785*b7893ccfSSadaf Ebrahimi {
6786*b7893ccfSSadaf Ebrahimi return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6787*b7893ccfSSadaf Ebrahimi }
6788*b7893ccfSSadaf Ebrahimi
6789*b7893ccfSSadaf Ebrahimi template<typename T>
VmaAllocateArray(VmaAllocator hAllocator,size_t count)6790*b7893ccfSSadaf Ebrahimi static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6791*b7893ccfSSadaf Ebrahimi {
6792*b7893ccfSSadaf Ebrahimi return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6793*b7893ccfSSadaf Ebrahimi }
6794*b7893ccfSSadaf Ebrahimi
6795*b7893ccfSSadaf Ebrahimi template<typename T>
vma_delete(VmaAllocator hAllocator,T * ptr)6796*b7893ccfSSadaf Ebrahimi static void vma_delete(VmaAllocator hAllocator, T* ptr)
6797*b7893ccfSSadaf Ebrahimi {
6798*b7893ccfSSadaf Ebrahimi if(ptr != VMA_NULL)
6799*b7893ccfSSadaf Ebrahimi {
6800*b7893ccfSSadaf Ebrahimi ptr->~T();
6801*b7893ccfSSadaf Ebrahimi VmaFree(hAllocator, ptr);
6802*b7893ccfSSadaf Ebrahimi }
6803*b7893ccfSSadaf Ebrahimi }
6804*b7893ccfSSadaf Ebrahimi
6805*b7893ccfSSadaf Ebrahimi template<typename T>
vma_delete_array(VmaAllocator hAllocator,T * ptr,size_t count)6806*b7893ccfSSadaf Ebrahimi static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6807*b7893ccfSSadaf Ebrahimi {
6808*b7893ccfSSadaf Ebrahimi if(ptr != VMA_NULL)
6809*b7893ccfSSadaf Ebrahimi {
6810*b7893ccfSSadaf Ebrahimi for(size_t i = count; i--; )
6811*b7893ccfSSadaf Ebrahimi ptr[i].~T();
6812*b7893ccfSSadaf Ebrahimi VmaFree(hAllocator, ptr);
6813*b7893ccfSSadaf Ebrahimi }
6814*b7893ccfSSadaf Ebrahimi }
6815*b7893ccfSSadaf Ebrahimi
6816*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
6817*b7893ccfSSadaf Ebrahimi // VmaStringBuilder
6818*b7893ccfSSadaf Ebrahimi
6819*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
6820*b7893ccfSSadaf Ebrahimi
6821*b7893ccfSSadaf Ebrahimi class VmaStringBuilder
6822*b7893ccfSSadaf Ebrahimi {
6823*b7893ccfSSadaf Ebrahimi public:
VmaStringBuilder(VmaAllocator alloc)6824*b7893ccfSSadaf Ebrahimi VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
GetLength()6825*b7893ccfSSadaf Ebrahimi size_t GetLength() const { return m_Data.size(); }
GetData()6826*b7893ccfSSadaf Ebrahimi const char* GetData() const { return m_Data.data(); }
6827*b7893ccfSSadaf Ebrahimi
Add(char ch)6828*b7893ccfSSadaf Ebrahimi void Add(char ch) { m_Data.push_back(ch); }
6829*b7893ccfSSadaf Ebrahimi void Add(const char* pStr);
AddNewLine()6830*b7893ccfSSadaf Ebrahimi void AddNewLine() { Add('\n'); }
6831*b7893ccfSSadaf Ebrahimi void AddNumber(uint32_t num);
6832*b7893ccfSSadaf Ebrahimi void AddNumber(uint64_t num);
6833*b7893ccfSSadaf Ebrahimi void AddPointer(const void* ptr);
6834*b7893ccfSSadaf Ebrahimi
6835*b7893ccfSSadaf Ebrahimi private:
6836*b7893ccfSSadaf Ebrahimi VmaVector< char, VmaStlAllocator<char> > m_Data;
6837*b7893ccfSSadaf Ebrahimi };
6838*b7893ccfSSadaf Ebrahimi
Add(const char * pStr)6839*b7893ccfSSadaf Ebrahimi void VmaStringBuilder::Add(const char* pStr)
6840*b7893ccfSSadaf Ebrahimi {
6841*b7893ccfSSadaf Ebrahimi const size_t strLen = strlen(pStr);
6842*b7893ccfSSadaf Ebrahimi if(strLen > 0)
6843*b7893ccfSSadaf Ebrahimi {
6844*b7893ccfSSadaf Ebrahimi const size_t oldCount = m_Data.size();
6845*b7893ccfSSadaf Ebrahimi m_Data.resize(oldCount + strLen);
6846*b7893ccfSSadaf Ebrahimi memcpy(m_Data.data() + oldCount, pStr, strLen);
6847*b7893ccfSSadaf Ebrahimi }
6848*b7893ccfSSadaf Ebrahimi }
6849*b7893ccfSSadaf Ebrahimi
AddNumber(uint32_t num)6850*b7893ccfSSadaf Ebrahimi void VmaStringBuilder::AddNumber(uint32_t num)
6851*b7893ccfSSadaf Ebrahimi {
6852*b7893ccfSSadaf Ebrahimi char buf[11];
6853*b7893ccfSSadaf Ebrahimi VmaUint32ToStr(buf, sizeof(buf), num);
6854*b7893ccfSSadaf Ebrahimi Add(buf);
6855*b7893ccfSSadaf Ebrahimi }
6856*b7893ccfSSadaf Ebrahimi
AddNumber(uint64_t num)6857*b7893ccfSSadaf Ebrahimi void VmaStringBuilder::AddNumber(uint64_t num)
6858*b7893ccfSSadaf Ebrahimi {
6859*b7893ccfSSadaf Ebrahimi char buf[21];
6860*b7893ccfSSadaf Ebrahimi VmaUint64ToStr(buf, sizeof(buf), num);
6861*b7893ccfSSadaf Ebrahimi Add(buf);
6862*b7893ccfSSadaf Ebrahimi }
6863*b7893ccfSSadaf Ebrahimi
AddPointer(const void * ptr)6864*b7893ccfSSadaf Ebrahimi void VmaStringBuilder::AddPointer(const void* ptr)
6865*b7893ccfSSadaf Ebrahimi {
6866*b7893ccfSSadaf Ebrahimi char buf[21];
6867*b7893ccfSSadaf Ebrahimi VmaPtrToStr(buf, sizeof(buf), ptr);
6868*b7893ccfSSadaf Ebrahimi Add(buf);
6869*b7893ccfSSadaf Ebrahimi }
6870*b7893ccfSSadaf Ebrahimi
6871*b7893ccfSSadaf Ebrahimi #endif // #if VMA_STATS_STRING_ENABLED
6872*b7893ccfSSadaf Ebrahimi
6873*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
6874*b7893ccfSSadaf Ebrahimi // VmaJsonWriter
6875*b7893ccfSSadaf Ebrahimi
6876*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
6877*b7893ccfSSadaf Ebrahimi
6878*b7893ccfSSadaf Ebrahimi class VmaJsonWriter
6879*b7893ccfSSadaf Ebrahimi {
6880*b7893ccfSSadaf Ebrahimi VMA_CLASS_NO_COPY(VmaJsonWriter)
6881*b7893ccfSSadaf Ebrahimi public:
6882*b7893ccfSSadaf Ebrahimi VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6883*b7893ccfSSadaf Ebrahimi ~VmaJsonWriter();
6884*b7893ccfSSadaf Ebrahimi
6885*b7893ccfSSadaf Ebrahimi void BeginObject(bool singleLine = false);
6886*b7893ccfSSadaf Ebrahimi void EndObject();
6887*b7893ccfSSadaf Ebrahimi
6888*b7893ccfSSadaf Ebrahimi void BeginArray(bool singleLine = false);
6889*b7893ccfSSadaf Ebrahimi void EndArray();
6890*b7893ccfSSadaf Ebrahimi
6891*b7893ccfSSadaf Ebrahimi void WriteString(const char* pStr);
6892*b7893ccfSSadaf Ebrahimi void BeginString(const char* pStr = VMA_NULL);
6893*b7893ccfSSadaf Ebrahimi void ContinueString(const char* pStr);
6894*b7893ccfSSadaf Ebrahimi void ContinueString(uint32_t n);
6895*b7893ccfSSadaf Ebrahimi void ContinueString(uint64_t n);
6896*b7893ccfSSadaf Ebrahimi void ContinueString_Pointer(const void* ptr);
6897*b7893ccfSSadaf Ebrahimi void EndString(const char* pStr = VMA_NULL);
6898*b7893ccfSSadaf Ebrahimi
6899*b7893ccfSSadaf Ebrahimi void WriteNumber(uint32_t n);
6900*b7893ccfSSadaf Ebrahimi void WriteNumber(uint64_t n);
6901*b7893ccfSSadaf Ebrahimi void WriteBool(bool b);
6902*b7893ccfSSadaf Ebrahimi void WriteNull();
6903*b7893ccfSSadaf Ebrahimi
6904*b7893ccfSSadaf Ebrahimi private:
6905*b7893ccfSSadaf Ebrahimi static const char* const INDENT;
6906*b7893ccfSSadaf Ebrahimi
6907*b7893ccfSSadaf Ebrahimi enum COLLECTION_TYPE
6908*b7893ccfSSadaf Ebrahimi {
6909*b7893ccfSSadaf Ebrahimi COLLECTION_TYPE_OBJECT,
6910*b7893ccfSSadaf Ebrahimi COLLECTION_TYPE_ARRAY,
6911*b7893ccfSSadaf Ebrahimi };
6912*b7893ccfSSadaf Ebrahimi struct StackItem
6913*b7893ccfSSadaf Ebrahimi {
6914*b7893ccfSSadaf Ebrahimi COLLECTION_TYPE type;
6915*b7893ccfSSadaf Ebrahimi uint32_t valueCount;
6916*b7893ccfSSadaf Ebrahimi bool singleLineMode;
6917*b7893ccfSSadaf Ebrahimi };
6918*b7893ccfSSadaf Ebrahimi
6919*b7893ccfSSadaf Ebrahimi VmaStringBuilder& m_SB;
6920*b7893ccfSSadaf Ebrahimi VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6921*b7893ccfSSadaf Ebrahimi bool m_InsideString;
6922*b7893ccfSSadaf Ebrahimi
6923*b7893ccfSSadaf Ebrahimi void BeginValue(bool isString);
6924*b7893ccfSSadaf Ebrahimi void WriteIndent(bool oneLess = false);
6925*b7893ccfSSadaf Ebrahimi };
6926*b7893ccfSSadaf Ebrahimi
6927*b7893ccfSSadaf Ebrahimi const char* const VmaJsonWriter::INDENT = " ";
6928*b7893ccfSSadaf Ebrahimi
VmaJsonWriter(const VkAllocationCallbacks * pAllocationCallbacks,VmaStringBuilder & sb)6929*b7893ccfSSadaf Ebrahimi VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6930*b7893ccfSSadaf Ebrahimi m_SB(sb),
6931*b7893ccfSSadaf Ebrahimi m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6932*b7893ccfSSadaf Ebrahimi m_InsideString(false)
6933*b7893ccfSSadaf Ebrahimi {
6934*b7893ccfSSadaf Ebrahimi }
6935*b7893ccfSSadaf Ebrahimi
~VmaJsonWriter()6936*b7893ccfSSadaf Ebrahimi VmaJsonWriter::~VmaJsonWriter()
6937*b7893ccfSSadaf Ebrahimi {
6938*b7893ccfSSadaf Ebrahimi VMA_ASSERT(!m_InsideString);
6939*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_Stack.empty());
6940*b7893ccfSSadaf Ebrahimi }
6941*b7893ccfSSadaf Ebrahimi
BeginObject(bool singleLine)6942*b7893ccfSSadaf Ebrahimi void VmaJsonWriter::BeginObject(bool singleLine)
6943*b7893ccfSSadaf Ebrahimi {
6944*b7893ccfSSadaf Ebrahimi VMA_ASSERT(!m_InsideString);
6945*b7893ccfSSadaf Ebrahimi
6946*b7893ccfSSadaf Ebrahimi BeginValue(false);
6947*b7893ccfSSadaf Ebrahimi m_SB.Add('{');
6948*b7893ccfSSadaf Ebrahimi
6949*b7893ccfSSadaf Ebrahimi StackItem item;
6950*b7893ccfSSadaf Ebrahimi item.type = COLLECTION_TYPE_OBJECT;
6951*b7893ccfSSadaf Ebrahimi item.valueCount = 0;
6952*b7893ccfSSadaf Ebrahimi item.singleLineMode = singleLine;
6953*b7893ccfSSadaf Ebrahimi m_Stack.push_back(item);
6954*b7893ccfSSadaf Ebrahimi }
6955*b7893ccfSSadaf Ebrahimi
EndObject()6956*b7893ccfSSadaf Ebrahimi void VmaJsonWriter::EndObject()
6957*b7893ccfSSadaf Ebrahimi {
6958*b7893ccfSSadaf Ebrahimi VMA_ASSERT(!m_InsideString);
6959*b7893ccfSSadaf Ebrahimi
6960*b7893ccfSSadaf Ebrahimi WriteIndent(true);
6961*b7893ccfSSadaf Ebrahimi m_SB.Add('}');
6962*b7893ccfSSadaf Ebrahimi
6963*b7893ccfSSadaf Ebrahimi VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6964*b7893ccfSSadaf Ebrahimi m_Stack.pop_back();
6965*b7893ccfSSadaf Ebrahimi }
6966*b7893ccfSSadaf Ebrahimi
BeginArray(bool singleLine)6967*b7893ccfSSadaf Ebrahimi void VmaJsonWriter::BeginArray(bool singleLine)
6968*b7893ccfSSadaf Ebrahimi {
6969*b7893ccfSSadaf Ebrahimi VMA_ASSERT(!m_InsideString);
6970*b7893ccfSSadaf Ebrahimi
6971*b7893ccfSSadaf Ebrahimi BeginValue(false);
6972*b7893ccfSSadaf Ebrahimi m_SB.Add('[');
6973*b7893ccfSSadaf Ebrahimi
6974*b7893ccfSSadaf Ebrahimi StackItem item;
6975*b7893ccfSSadaf Ebrahimi item.type = COLLECTION_TYPE_ARRAY;
6976*b7893ccfSSadaf Ebrahimi item.valueCount = 0;
6977*b7893ccfSSadaf Ebrahimi item.singleLineMode = singleLine;
6978*b7893ccfSSadaf Ebrahimi m_Stack.push_back(item);
6979*b7893ccfSSadaf Ebrahimi }
6980*b7893ccfSSadaf Ebrahimi
EndArray()6981*b7893ccfSSadaf Ebrahimi void VmaJsonWriter::EndArray()
6982*b7893ccfSSadaf Ebrahimi {
6983*b7893ccfSSadaf Ebrahimi VMA_ASSERT(!m_InsideString);
6984*b7893ccfSSadaf Ebrahimi
6985*b7893ccfSSadaf Ebrahimi WriteIndent(true);
6986*b7893ccfSSadaf Ebrahimi m_SB.Add(']');
6987*b7893ccfSSadaf Ebrahimi
6988*b7893ccfSSadaf Ebrahimi VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6989*b7893ccfSSadaf Ebrahimi m_Stack.pop_back();
6990*b7893ccfSSadaf Ebrahimi }
6991*b7893ccfSSadaf Ebrahimi
WriteString(const char * pStr)6992*b7893ccfSSadaf Ebrahimi void VmaJsonWriter::WriteString(const char* pStr)
6993*b7893ccfSSadaf Ebrahimi {
6994*b7893ccfSSadaf Ebrahimi BeginString(pStr);
6995*b7893ccfSSadaf Ebrahimi EndString();
6996*b7893ccfSSadaf Ebrahimi }
6997*b7893ccfSSadaf Ebrahimi
BeginString(const char * pStr)6998*b7893ccfSSadaf Ebrahimi void VmaJsonWriter::BeginString(const char* pStr)
6999*b7893ccfSSadaf Ebrahimi {
7000*b7893ccfSSadaf Ebrahimi VMA_ASSERT(!m_InsideString);
7001*b7893ccfSSadaf Ebrahimi
7002*b7893ccfSSadaf Ebrahimi BeginValue(true);
7003*b7893ccfSSadaf Ebrahimi m_SB.Add('"');
7004*b7893ccfSSadaf Ebrahimi m_InsideString = true;
7005*b7893ccfSSadaf Ebrahimi if(pStr != VMA_NULL && pStr[0] != '\0')
7006*b7893ccfSSadaf Ebrahimi {
7007*b7893ccfSSadaf Ebrahimi ContinueString(pStr);
7008*b7893ccfSSadaf Ebrahimi }
7009*b7893ccfSSadaf Ebrahimi }
7010*b7893ccfSSadaf Ebrahimi
ContinueString(const char * pStr)7011*b7893ccfSSadaf Ebrahimi void VmaJsonWriter::ContinueString(const char* pStr)
7012*b7893ccfSSadaf Ebrahimi {
7013*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_InsideString);
7014*b7893ccfSSadaf Ebrahimi
7015*b7893ccfSSadaf Ebrahimi const size_t strLen = strlen(pStr);
7016*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < strLen; ++i)
7017*b7893ccfSSadaf Ebrahimi {
7018*b7893ccfSSadaf Ebrahimi char ch = pStr[i];
7019*b7893ccfSSadaf Ebrahimi if(ch == '\\')
7020*b7893ccfSSadaf Ebrahimi {
7021*b7893ccfSSadaf Ebrahimi m_SB.Add("\\\\");
7022*b7893ccfSSadaf Ebrahimi }
7023*b7893ccfSSadaf Ebrahimi else if(ch == '"')
7024*b7893ccfSSadaf Ebrahimi {
7025*b7893ccfSSadaf Ebrahimi m_SB.Add("\\\"");
7026*b7893ccfSSadaf Ebrahimi }
7027*b7893ccfSSadaf Ebrahimi else if(ch >= 32)
7028*b7893ccfSSadaf Ebrahimi {
7029*b7893ccfSSadaf Ebrahimi m_SB.Add(ch);
7030*b7893ccfSSadaf Ebrahimi }
7031*b7893ccfSSadaf Ebrahimi else switch(ch)
7032*b7893ccfSSadaf Ebrahimi {
7033*b7893ccfSSadaf Ebrahimi case '\b':
7034*b7893ccfSSadaf Ebrahimi m_SB.Add("\\b");
7035*b7893ccfSSadaf Ebrahimi break;
7036*b7893ccfSSadaf Ebrahimi case '\f':
7037*b7893ccfSSadaf Ebrahimi m_SB.Add("\\f");
7038*b7893ccfSSadaf Ebrahimi break;
7039*b7893ccfSSadaf Ebrahimi case '\n':
7040*b7893ccfSSadaf Ebrahimi m_SB.Add("\\n");
7041*b7893ccfSSadaf Ebrahimi break;
7042*b7893ccfSSadaf Ebrahimi case '\r':
7043*b7893ccfSSadaf Ebrahimi m_SB.Add("\\r");
7044*b7893ccfSSadaf Ebrahimi break;
7045*b7893ccfSSadaf Ebrahimi case '\t':
7046*b7893ccfSSadaf Ebrahimi m_SB.Add("\\t");
7047*b7893ccfSSadaf Ebrahimi break;
7048*b7893ccfSSadaf Ebrahimi default:
7049*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "Character not currently supported.");
7050*b7893ccfSSadaf Ebrahimi break;
7051*b7893ccfSSadaf Ebrahimi }
7052*b7893ccfSSadaf Ebrahimi }
7053*b7893ccfSSadaf Ebrahimi }
7054*b7893ccfSSadaf Ebrahimi
ContinueString(uint32_t n)7055*b7893ccfSSadaf Ebrahimi void VmaJsonWriter::ContinueString(uint32_t n)
7056*b7893ccfSSadaf Ebrahimi {
7057*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_InsideString);
7058*b7893ccfSSadaf Ebrahimi m_SB.AddNumber(n);
7059*b7893ccfSSadaf Ebrahimi }
7060*b7893ccfSSadaf Ebrahimi
ContinueString(uint64_t n)7061*b7893ccfSSadaf Ebrahimi void VmaJsonWriter::ContinueString(uint64_t n)
7062*b7893ccfSSadaf Ebrahimi {
7063*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_InsideString);
7064*b7893ccfSSadaf Ebrahimi m_SB.AddNumber(n);
7065*b7893ccfSSadaf Ebrahimi }
7066*b7893ccfSSadaf Ebrahimi
ContinueString_Pointer(const void * ptr)7067*b7893ccfSSadaf Ebrahimi void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7068*b7893ccfSSadaf Ebrahimi {
7069*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_InsideString);
7070*b7893ccfSSadaf Ebrahimi m_SB.AddPointer(ptr);
7071*b7893ccfSSadaf Ebrahimi }
7072*b7893ccfSSadaf Ebrahimi
EndString(const char * pStr)7073*b7893ccfSSadaf Ebrahimi void VmaJsonWriter::EndString(const char* pStr)
7074*b7893ccfSSadaf Ebrahimi {
7075*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_InsideString);
7076*b7893ccfSSadaf Ebrahimi if(pStr != VMA_NULL && pStr[0] != '\0')
7077*b7893ccfSSadaf Ebrahimi {
7078*b7893ccfSSadaf Ebrahimi ContinueString(pStr);
7079*b7893ccfSSadaf Ebrahimi }
7080*b7893ccfSSadaf Ebrahimi m_SB.Add('"');
7081*b7893ccfSSadaf Ebrahimi m_InsideString = false;
7082*b7893ccfSSadaf Ebrahimi }
7083*b7893ccfSSadaf Ebrahimi
WriteNumber(uint32_t n)7084*b7893ccfSSadaf Ebrahimi void VmaJsonWriter::WriteNumber(uint32_t n)
7085*b7893ccfSSadaf Ebrahimi {
7086*b7893ccfSSadaf Ebrahimi VMA_ASSERT(!m_InsideString);
7087*b7893ccfSSadaf Ebrahimi BeginValue(false);
7088*b7893ccfSSadaf Ebrahimi m_SB.AddNumber(n);
7089*b7893ccfSSadaf Ebrahimi }
7090*b7893ccfSSadaf Ebrahimi
WriteNumber(uint64_t n)7091*b7893ccfSSadaf Ebrahimi void VmaJsonWriter::WriteNumber(uint64_t n)
7092*b7893ccfSSadaf Ebrahimi {
7093*b7893ccfSSadaf Ebrahimi VMA_ASSERT(!m_InsideString);
7094*b7893ccfSSadaf Ebrahimi BeginValue(false);
7095*b7893ccfSSadaf Ebrahimi m_SB.AddNumber(n);
7096*b7893ccfSSadaf Ebrahimi }
7097*b7893ccfSSadaf Ebrahimi
WriteBool(bool b)7098*b7893ccfSSadaf Ebrahimi void VmaJsonWriter::WriteBool(bool b)
7099*b7893ccfSSadaf Ebrahimi {
7100*b7893ccfSSadaf Ebrahimi VMA_ASSERT(!m_InsideString);
7101*b7893ccfSSadaf Ebrahimi BeginValue(false);
7102*b7893ccfSSadaf Ebrahimi m_SB.Add(b ? "true" : "false");
7103*b7893ccfSSadaf Ebrahimi }
7104*b7893ccfSSadaf Ebrahimi
WriteNull()7105*b7893ccfSSadaf Ebrahimi void VmaJsonWriter::WriteNull()
7106*b7893ccfSSadaf Ebrahimi {
7107*b7893ccfSSadaf Ebrahimi VMA_ASSERT(!m_InsideString);
7108*b7893ccfSSadaf Ebrahimi BeginValue(false);
7109*b7893ccfSSadaf Ebrahimi m_SB.Add("null");
7110*b7893ccfSSadaf Ebrahimi }
7111*b7893ccfSSadaf Ebrahimi
BeginValue(bool isString)7112*b7893ccfSSadaf Ebrahimi void VmaJsonWriter::BeginValue(bool isString)
7113*b7893ccfSSadaf Ebrahimi {
7114*b7893ccfSSadaf Ebrahimi if(!m_Stack.empty())
7115*b7893ccfSSadaf Ebrahimi {
7116*b7893ccfSSadaf Ebrahimi StackItem& currItem = m_Stack.back();
7117*b7893ccfSSadaf Ebrahimi if(currItem.type == COLLECTION_TYPE_OBJECT &&
7118*b7893ccfSSadaf Ebrahimi currItem.valueCount % 2 == 0)
7119*b7893ccfSSadaf Ebrahimi {
7120*b7893ccfSSadaf Ebrahimi VMA_ASSERT(isString);
7121*b7893ccfSSadaf Ebrahimi }
7122*b7893ccfSSadaf Ebrahimi
7123*b7893ccfSSadaf Ebrahimi if(currItem.type == COLLECTION_TYPE_OBJECT &&
7124*b7893ccfSSadaf Ebrahimi currItem.valueCount % 2 != 0)
7125*b7893ccfSSadaf Ebrahimi {
7126*b7893ccfSSadaf Ebrahimi m_SB.Add(": ");
7127*b7893ccfSSadaf Ebrahimi }
7128*b7893ccfSSadaf Ebrahimi else if(currItem.valueCount > 0)
7129*b7893ccfSSadaf Ebrahimi {
7130*b7893ccfSSadaf Ebrahimi m_SB.Add(", ");
7131*b7893ccfSSadaf Ebrahimi WriteIndent();
7132*b7893ccfSSadaf Ebrahimi }
7133*b7893ccfSSadaf Ebrahimi else
7134*b7893ccfSSadaf Ebrahimi {
7135*b7893ccfSSadaf Ebrahimi WriteIndent();
7136*b7893ccfSSadaf Ebrahimi }
7137*b7893ccfSSadaf Ebrahimi ++currItem.valueCount;
7138*b7893ccfSSadaf Ebrahimi }
7139*b7893ccfSSadaf Ebrahimi }
7140*b7893ccfSSadaf Ebrahimi
WriteIndent(bool oneLess)7141*b7893ccfSSadaf Ebrahimi void VmaJsonWriter::WriteIndent(bool oneLess)
7142*b7893ccfSSadaf Ebrahimi {
7143*b7893ccfSSadaf Ebrahimi if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7144*b7893ccfSSadaf Ebrahimi {
7145*b7893ccfSSadaf Ebrahimi m_SB.AddNewLine();
7146*b7893ccfSSadaf Ebrahimi
7147*b7893ccfSSadaf Ebrahimi size_t count = m_Stack.size();
7148*b7893ccfSSadaf Ebrahimi if(count > 0 && oneLess)
7149*b7893ccfSSadaf Ebrahimi {
7150*b7893ccfSSadaf Ebrahimi --count;
7151*b7893ccfSSadaf Ebrahimi }
7152*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < count; ++i)
7153*b7893ccfSSadaf Ebrahimi {
7154*b7893ccfSSadaf Ebrahimi m_SB.Add(INDENT);
7155*b7893ccfSSadaf Ebrahimi }
7156*b7893ccfSSadaf Ebrahimi }
7157*b7893ccfSSadaf Ebrahimi }
7158*b7893ccfSSadaf Ebrahimi
7159*b7893ccfSSadaf Ebrahimi #endif // #if VMA_STATS_STRING_ENABLED
7160*b7893ccfSSadaf Ebrahimi
7161*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
7162*b7893ccfSSadaf Ebrahimi
SetUserData(VmaAllocator hAllocator,void * pUserData)7163*b7893ccfSSadaf Ebrahimi void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7164*b7893ccfSSadaf Ebrahimi {
7165*b7893ccfSSadaf Ebrahimi if(IsUserDataString())
7166*b7893ccfSSadaf Ebrahimi {
7167*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7168*b7893ccfSSadaf Ebrahimi
7169*b7893ccfSSadaf Ebrahimi FreeUserDataString(hAllocator);
7170*b7893ccfSSadaf Ebrahimi
7171*b7893ccfSSadaf Ebrahimi if(pUserData != VMA_NULL)
7172*b7893ccfSSadaf Ebrahimi {
7173*b7893ccfSSadaf Ebrahimi const char* const newStrSrc = (char*)pUserData;
7174*b7893ccfSSadaf Ebrahimi const size_t newStrLen = strlen(newStrSrc);
7175*b7893ccfSSadaf Ebrahimi char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7176*b7893ccfSSadaf Ebrahimi memcpy(newStrDst, newStrSrc, newStrLen + 1);
7177*b7893ccfSSadaf Ebrahimi m_pUserData = newStrDst;
7178*b7893ccfSSadaf Ebrahimi }
7179*b7893ccfSSadaf Ebrahimi }
7180*b7893ccfSSadaf Ebrahimi else
7181*b7893ccfSSadaf Ebrahimi {
7182*b7893ccfSSadaf Ebrahimi m_pUserData = pUserData;
7183*b7893ccfSSadaf Ebrahimi }
7184*b7893ccfSSadaf Ebrahimi }
7185*b7893ccfSSadaf Ebrahimi
ChangeBlockAllocation(VmaAllocator hAllocator,VmaDeviceMemoryBlock * block,VkDeviceSize offset)7186*b7893ccfSSadaf Ebrahimi void VmaAllocation_T::ChangeBlockAllocation(
7187*b7893ccfSSadaf Ebrahimi VmaAllocator hAllocator,
7188*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* block,
7189*b7893ccfSSadaf Ebrahimi VkDeviceSize offset)
7190*b7893ccfSSadaf Ebrahimi {
7191*b7893ccfSSadaf Ebrahimi VMA_ASSERT(block != VMA_NULL);
7192*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7193*b7893ccfSSadaf Ebrahimi
7194*b7893ccfSSadaf Ebrahimi // Move mapping reference counter from old block to new block.
7195*b7893ccfSSadaf Ebrahimi if(block != m_BlockAllocation.m_Block)
7196*b7893ccfSSadaf Ebrahimi {
7197*b7893ccfSSadaf Ebrahimi uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7198*b7893ccfSSadaf Ebrahimi if(IsPersistentMap())
7199*b7893ccfSSadaf Ebrahimi ++mapRefCount;
7200*b7893ccfSSadaf Ebrahimi m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7201*b7893ccfSSadaf Ebrahimi block->Map(hAllocator, mapRefCount, VMA_NULL);
7202*b7893ccfSSadaf Ebrahimi }
7203*b7893ccfSSadaf Ebrahimi
7204*b7893ccfSSadaf Ebrahimi m_BlockAllocation.m_Block = block;
7205*b7893ccfSSadaf Ebrahimi m_BlockAllocation.m_Offset = offset;
7206*b7893ccfSSadaf Ebrahimi }
7207*b7893ccfSSadaf Ebrahimi
ChangeSize(VkDeviceSize newSize)7208*b7893ccfSSadaf Ebrahimi void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7209*b7893ccfSSadaf Ebrahimi {
7210*b7893ccfSSadaf Ebrahimi VMA_ASSERT(newSize > 0);
7211*b7893ccfSSadaf Ebrahimi m_Size = newSize;
7212*b7893ccfSSadaf Ebrahimi }
7213*b7893ccfSSadaf Ebrahimi
ChangeOffset(VkDeviceSize newOffset)7214*b7893ccfSSadaf Ebrahimi void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7215*b7893ccfSSadaf Ebrahimi {
7216*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7217*b7893ccfSSadaf Ebrahimi m_BlockAllocation.m_Offset = newOffset;
7218*b7893ccfSSadaf Ebrahimi }
7219*b7893ccfSSadaf Ebrahimi
GetOffset()7220*b7893ccfSSadaf Ebrahimi VkDeviceSize VmaAllocation_T::GetOffset() const
7221*b7893ccfSSadaf Ebrahimi {
7222*b7893ccfSSadaf Ebrahimi switch(m_Type)
7223*b7893ccfSSadaf Ebrahimi {
7224*b7893ccfSSadaf Ebrahimi case ALLOCATION_TYPE_BLOCK:
7225*b7893ccfSSadaf Ebrahimi return m_BlockAllocation.m_Offset;
7226*b7893ccfSSadaf Ebrahimi case ALLOCATION_TYPE_DEDICATED:
7227*b7893ccfSSadaf Ebrahimi return 0;
7228*b7893ccfSSadaf Ebrahimi default:
7229*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
7230*b7893ccfSSadaf Ebrahimi return 0;
7231*b7893ccfSSadaf Ebrahimi }
7232*b7893ccfSSadaf Ebrahimi }
7233*b7893ccfSSadaf Ebrahimi
GetMemory()7234*b7893ccfSSadaf Ebrahimi VkDeviceMemory VmaAllocation_T::GetMemory() const
7235*b7893ccfSSadaf Ebrahimi {
7236*b7893ccfSSadaf Ebrahimi switch(m_Type)
7237*b7893ccfSSadaf Ebrahimi {
7238*b7893ccfSSadaf Ebrahimi case ALLOCATION_TYPE_BLOCK:
7239*b7893ccfSSadaf Ebrahimi return m_BlockAllocation.m_Block->GetDeviceMemory();
7240*b7893ccfSSadaf Ebrahimi case ALLOCATION_TYPE_DEDICATED:
7241*b7893ccfSSadaf Ebrahimi return m_DedicatedAllocation.m_hMemory;
7242*b7893ccfSSadaf Ebrahimi default:
7243*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
7244*b7893ccfSSadaf Ebrahimi return VK_NULL_HANDLE;
7245*b7893ccfSSadaf Ebrahimi }
7246*b7893ccfSSadaf Ebrahimi }
7247*b7893ccfSSadaf Ebrahimi
GetMemoryTypeIndex()7248*b7893ccfSSadaf Ebrahimi uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7249*b7893ccfSSadaf Ebrahimi {
7250*b7893ccfSSadaf Ebrahimi switch(m_Type)
7251*b7893ccfSSadaf Ebrahimi {
7252*b7893ccfSSadaf Ebrahimi case ALLOCATION_TYPE_BLOCK:
7253*b7893ccfSSadaf Ebrahimi return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7254*b7893ccfSSadaf Ebrahimi case ALLOCATION_TYPE_DEDICATED:
7255*b7893ccfSSadaf Ebrahimi return m_DedicatedAllocation.m_MemoryTypeIndex;
7256*b7893ccfSSadaf Ebrahimi default:
7257*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
7258*b7893ccfSSadaf Ebrahimi return UINT32_MAX;
7259*b7893ccfSSadaf Ebrahimi }
7260*b7893ccfSSadaf Ebrahimi }
7261*b7893ccfSSadaf Ebrahimi
GetMappedData()7262*b7893ccfSSadaf Ebrahimi void* VmaAllocation_T::GetMappedData() const
7263*b7893ccfSSadaf Ebrahimi {
7264*b7893ccfSSadaf Ebrahimi switch(m_Type)
7265*b7893ccfSSadaf Ebrahimi {
7266*b7893ccfSSadaf Ebrahimi case ALLOCATION_TYPE_BLOCK:
7267*b7893ccfSSadaf Ebrahimi if(m_MapCount != 0)
7268*b7893ccfSSadaf Ebrahimi {
7269*b7893ccfSSadaf Ebrahimi void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7270*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pBlockData != VMA_NULL);
7271*b7893ccfSSadaf Ebrahimi return (char*)pBlockData + m_BlockAllocation.m_Offset;
7272*b7893ccfSSadaf Ebrahimi }
7273*b7893ccfSSadaf Ebrahimi else
7274*b7893ccfSSadaf Ebrahimi {
7275*b7893ccfSSadaf Ebrahimi return VMA_NULL;
7276*b7893ccfSSadaf Ebrahimi }
7277*b7893ccfSSadaf Ebrahimi break;
7278*b7893ccfSSadaf Ebrahimi case ALLOCATION_TYPE_DEDICATED:
7279*b7893ccfSSadaf Ebrahimi VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7280*b7893ccfSSadaf Ebrahimi return m_DedicatedAllocation.m_pMappedData;
7281*b7893ccfSSadaf Ebrahimi default:
7282*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
7283*b7893ccfSSadaf Ebrahimi return VMA_NULL;
7284*b7893ccfSSadaf Ebrahimi }
7285*b7893ccfSSadaf Ebrahimi }
7286*b7893ccfSSadaf Ebrahimi
CanBecomeLost()7287*b7893ccfSSadaf Ebrahimi bool VmaAllocation_T::CanBecomeLost() const
7288*b7893ccfSSadaf Ebrahimi {
7289*b7893ccfSSadaf Ebrahimi switch(m_Type)
7290*b7893ccfSSadaf Ebrahimi {
7291*b7893ccfSSadaf Ebrahimi case ALLOCATION_TYPE_BLOCK:
7292*b7893ccfSSadaf Ebrahimi return m_BlockAllocation.m_CanBecomeLost;
7293*b7893ccfSSadaf Ebrahimi case ALLOCATION_TYPE_DEDICATED:
7294*b7893ccfSSadaf Ebrahimi return false;
7295*b7893ccfSSadaf Ebrahimi default:
7296*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
7297*b7893ccfSSadaf Ebrahimi return false;
7298*b7893ccfSSadaf Ebrahimi }
7299*b7893ccfSSadaf Ebrahimi }
7300*b7893ccfSSadaf Ebrahimi
GetPool()7301*b7893ccfSSadaf Ebrahimi VmaPool VmaAllocation_T::GetPool() const
7302*b7893ccfSSadaf Ebrahimi {
7303*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7304*b7893ccfSSadaf Ebrahimi return m_BlockAllocation.m_hPool;
7305*b7893ccfSSadaf Ebrahimi }
7306*b7893ccfSSadaf Ebrahimi
MakeLost(uint32_t currentFrameIndex,uint32_t frameInUseCount)7307*b7893ccfSSadaf Ebrahimi bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7308*b7893ccfSSadaf Ebrahimi {
7309*b7893ccfSSadaf Ebrahimi VMA_ASSERT(CanBecomeLost());
7310*b7893ccfSSadaf Ebrahimi
7311*b7893ccfSSadaf Ebrahimi /*
7312*b7893ccfSSadaf Ebrahimi Warning: This is a carefully designed algorithm.
7313*b7893ccfSSadaf Ebrahimi Do not modify unless you really know what you're doing :)
7314*b7893ccfSSadaf Ebrahimi */
7315*b7893ccfSSadaf Ebrahimi uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7316*b7893ccfSSadaf Ebrahimi for(;;)
7317*b7893ccfSSadaf Ebrahimi {
7318*b7893ccfSSadaf Ebrahimi if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7319*b7893ccfSSadaf Ebrahimi {
7320*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
7321*b7893ccfSSadaf Ebrahimi return false;
7322*b7893ccfSSadaf Ebrahimi }
7323*b7893ccfSSadaf Ebrahimi else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7324*b7893ccfSSadaf Ebrahimi {
7325*b7893ccfSSadaf Ebrahimi return false;
7326*b7893ccfSSadaf Ebrahimi }
7327*b7893ccfSSadaf Ebrahimi else // Last use time earlier than current time.
7328*b7893ccfSSadaf Ebrahimi {
7329*b7893ccfSSadaf Ebrahimi if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7330*b7893ccfSSadaf Ebrahimi {
7331*b7893ccfSSadaf Ebrahimi // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7332*b7893ccfSSadaf Ebrahimi // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7333*b7893ccfSSadaf Ebrahimi return true;
7334*b7893ccfSSadaf Ebrahimi }
7335*b7893ccfSSadaf Ebrahimi }
7336*b7893ccfSSadaf Ebrahimi }
7337*b7893ccfSSadaf Ebrahimi }
7338*b7893ccfSSadaf Ebrahimi
7339*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
7340*b7893ccfSSadaf Ebrahimi
7341*b7893ccfSSadaf Ebrahimi // Correspond to values of enum VmaSuballocationType.
7342*b7893ccfSSadaf Ebrahimi static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7343*b7893ccfSSadaf Ebrahimi "FREE",
7344*b7893ccfSSadaf Ebrahimi "UNKNOWN",
7345*b7893ccfSSadaf Ebrahimi "BUFFER",
7346*b7893ccfSSadaf Ebrahimi "IMAGE_UNKNOWN",
7347*b7893ccfSSadaf Ebrahimi "IMAGE_LINEAR",
7348*b7893ccfSSadaf Ebrahimi "IMAGE_OPTIMAL",
7349*b7893ccfSSadaf Ebrahimi };
7350*b7893ccfSSadaf Ebrahimi
PrintParameters(class VmaJsonWriter & json)7351*b7893ccfSSadaf Ebrahimi void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7352*b7893ccfSSadaf Ebrahimi {
7353*b7893ccfSSadaf Ebrahimi json.WriteString("Type");
7354*b7893ccfSSadaf Ebrahimi json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7355*b7893ccfSSadaf Ebrahimi
7356*b7893ccfSSadaf Ebrahimi json.WriteString("Size");
7357*b7893ccfSSadaf Ebrahimi json.WriteNumber(m_Size);
7358*b7893ccfSSadaf Ebrahimi
7359*b7893ccfSSadaf Ebrahimi if(m_pUserData != VMA_NULL)
7360*b7893ccfSSadaf Ebrahimi {
7361*b7893ccfSSadaf Ebrahimi json.WriteString("UserData");
7362*b7893ccfSSadaf Ebrahimi if(IsUserDataString())
7363*b7893ccfSSadaf Ebrahimi {
7364*b7893ccfSSadaf Ebrahimi json.WriteString((const char*)m_pUserData);
7365*b7893ccfSSadaf Ebrahimi }
7366*b7893ccfSSadaf Ebrahimi else
7367*b7893ccfSSadaf Ebrahimi {
7368*b7893ccfSSadaf Ebrahimi json.BeginString();
7369*b7893ccfSSadaf Ebrahimi json.ContinueString_Pointer(m_pUserData);
7370*b7893ccfSSadaf Ebrahimi json.EndString();
7371*b7893ccfSSadaf Ebrahimi }
7372*b7893ccfSSadaf Ebrahimi }
7373*b7893ccfSSadaf Ebrahimi
7374*b7893ccfSSadaf Ebrahimi json.WriteString("CreationFrameIndex");
7375*b7893ccfSSadaf Ebrahimi json.WriteNumber(m_CreationFrameIndex);
7376*b7893ccfSSadaf Ebrahimi
7377*b7893ccfSSadaf Ebrahimi json.WriteString("LastUseFrameIndex");
7378*b7893ccfSSadaf Ebrahimi json.WriteNumber(GetLastUseFrameIndex());
7379*b7893ccfSSadaf Ebrahimi
7380*b7893ccfSSadaf Ebrahimi if(m_BufferImageUsage != 0)
7381*b7893ccfSSadaf Ebrahimi {
7382*b7893ccfSSadaf Ebrahimi json.WriteString("Usage");
7383*b7893ccfSSadaf Ebrahimi json.WriteNumber(m_BufferImageUsage);
7384*b7893ccfSSadaf Ebrahimi }
7385*b7893ccfSSadaf Ebrahimi }
7386*b7893ccfSSadaf Ebrahimi
7387*b7893ccfSSadaf Ebrahimi #endif
7388*b7893ccfSSadaf Ebrahimi
FreeUserDataString(VmaAllocator hAllocator)7389*b7893ccfSSadaf Ebrahimi void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7390*b7893ccfSSadaf Ebrahimi {
7391*b7893ccfSSadaf Ebrahimi VMA_ASSERT(IsUserDataString());
7392*b7893ccfSSadaf Ebrahimi if(m_pUserData != VMA_NULL)
7393*b7893ccfSSadaf Ebrahimi {
7394*b7893ccfSSadaf Ebrahimi char* const oldStr = (char*)m_pUserData;
7395*b7893ccfSSadaf Ebrahimi const size_t oldStrLen = strlen(oldStr);
7396*b7893ccfSSadaf Ebrahimi vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7397*b7893ccfSSadaf Ebrahimi m_pUserData = VMA_NULL;
7398*b7893ccfSSadaf Ebrahimi }
7399*b7893ccfSSadaf Ebrahimi }
7400*b7893ccfSSadaf Ebrahimi
BlockAllocMap()7401*b7893ccfSSadaf Ebrahimi void VmaAllocation_T::BlockAllocMap()
7402*b7893ccfSSadaf Ebrahimi {
7403*b7893ccfSSadaf Ebrahimi VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7404*b7893ccfSSadaf Ebrahimi
7405*b7893ccfSSadaf Ebrahimi if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7406*b7893ccfSSadaf Ebrahimi {
7407*b7893ccfSSadaf Ebrahimi ++m_MapCount;
7408*b7893ccfSSadaf Ebrahimi }
7409*b7893ccfSSadaf Ebrahimi else
7410*b7893ccfSSadaf Ebrahimi {
7411*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7412*b7893ccfSSadaf Ebrahimi }
7413*b7893ccfSSadaf Ebrahimi }
7414*b7893ccfSSadaf Ebrahimi
BlockAllocUnmap()7415*b7893ccfSSadaf Ebrahimi void VmaAllocation_T::BlockAllocUnmap()
7416*b7893ccfSSadaf Ebrahimi {
7417*b7893ccfSSadaf Ebrahimi VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7418*b7893ccfSSadaf Ebrahimi
7419*b7893ccfSSadaf Ebrahimi if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7420*b7893ccfSSadaf Ebrahimi {
7421*b7893ccfSSadaf Ebrahimi --m_MapCount;
7422*b7893ccfSSadaf Ebrahimi }
7423*b7893ccfSSadaf Ebrahimi else
7424*b7893ccfSSadaf Ebrahimi {
7425*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7426*b7893ccfSSadaf Ebrahimi }
7427*b7893ccfSSadaf Ebrahimi }
7428*b7893ccfSSadaf Ebrahimi
DedicatedAllocMap(VmaAllocator hAllocator,void ** ppData)7429*b7893ccfSSadaf Ebrahimi VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7430*b7893ccfSSadaf Ebrahimi {
7431*b7893ccfSSadaf Ebrahimi VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7432*b7893ccfSSadaf Ebrahimi
7433*b7893ccfSSadaf Ebrahimi if(m_MapCount != 0)
7434*b7893ccfSSadaf Ebrahimi {
7435*b7893ccfSSadaf Ebrahimi if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7436*b7893ccfSSadaf Ebrahimi {
7437*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7438*b7893ccfSSadaf Ebrahimi *ppData = m_DedicatedAllocation.m_pMappedData;
7439*b7893ccfSSadaf Ebrahimi ++m_MapCount;
7440*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
7441*b7893ccfSSadaf Ebrahimi }
7442*b7893ccfSSadaf Ebrahimi else
7443*b7893ccfSSadaf Ebrahimi {
7444*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7445*b7893ccfSSadaf Ebrahimi return VK_ERROR_MEMORY_MAP_FAILED;
7446*b7893ccfSSadaf Ebrahimi }
7447*b7893ccfSSadaf Ebrahimi }
7448*b7893ccfSSadaf Ebrahimi else
7449*b7893ccfSSadaf Ebrahimi {
7450*b7893ccfSSadaf Ebrahimi VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7451*b7893ccfSSadaf Ebrahimi hAllocator->m_hDevice,
7452*b7893ccfSSadaf Ebrahimi m_DedicatedAllocation.m_hMemory,
7453*b7893ccfSSadaf Ebrahimi 0, // offset
7454*b7893ccfSSadaf Ebrahimi VK_WHOLE_SIZE,
7455*b7893ccfSSadaf Ebrahimi 0, // flags
7456*b7893ccfSSadaf Ebrahimi ppData);
7457*b7893ccfSSadaf Ebrahimi if(result == VK_SUCCESS)
7458*b7893ccfSSadaf Ebrahimi {
7459*b7893ccfSSadaf Ebrahimi m_DedicatedAllocation.m_pMappedData = *ppData;
7460*b7893ccfSSadaf Ebrahimi m_MapCount = 1;
7461*b7893ccfSSadaf Ebrahimi }
7462*b7893ccfSSadaf Ebrahimi return result;
7463*b7893ccfSSadaf Ebrahimi }
7464*b7893ccfSSadaf Ebrahimi }
7465*b7893ccfSSadaf Ebrahimi
DedicatedAllocUnmap(VmaAllocator hAllocator)7466*b7893ccfSSadaf Ebrahimi void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7467*b7893ccfSSadaf Ebrahimi {
7468*b7893ccfSSadaf Ebrahimi VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7469*b7893ccfSSadaf Ebrahimi
7470*b7893ccfSSadaf Ebrahimi if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7471*b7893ccfSSadaf Ebrahimi {
7472*b7893ccfSSadaf Ebrahimi --m_MapCount;
7473*b7893ccfSSadaf Ebrahimi if(m_MapCount == 0)
7474*b7893ccfSSadaf Ebrahimi {
7475*b7893ccfSSadaf Ebrahimi m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7476*b7893ccfSSadaf Ebrahimi (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7477*b7893ccfSSadaf Ebrahimi hAllocator->m_hDevice,
7478*b7893ccfSSadaf Ebrahimi m_DedicatedAllocation.m_hMemory);
7479*b7893ccfSSadaf Ebrahimi }
7480*b7893ccfSSadaf Ebrahimi }
7481*b7893ccfSSadaf Ebrahimi else
7482*b7893ccfSSadaf Ebrahimi {
7483*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7484*b7893ccfSSadaf Ebrahimi }
7485*b7893ccfSSadaf Ebrahimi }
7486*b7893ccfSSadaf Ebrahimi
7487*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
7488*b7893ccfSSadaf Ebrahimi
VmaPrintStatInfo(VmaJsonWriter & json,const VmaStatInfo & stat)7489*b7893ccfSSadaf Ebrahimi static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7490*b7893ccfSSadaf Ebrahimi {
7491*b7893ccfSSadaf Ebrahimi json.BeginObject();
7492*b7893ccfSSadaf Ebrahimi
7493*b7893ccfSSadaf Ebrahimi json.WriteString("Blocks");
7494*b7893ccfSSadaf Ebrahimi json.WriteNumber(stat.blockCount);
7495*b7893ccfSSadaf Ebrahimi
7496*b7893ccfSSadaf Ebrahimi json.WriteString("Allocations");
7497*b7893ccfSSadaf Ebrahimi json.WriteNumber(stat.allocationCount);
7498*b7893ccfSSadaf Ebrahimi
7499*b7893ccfSSadaf Ebrahimi json.WriteString("UnusedRanges");
7500*b7893ccfSSadaf Ebrahimi json.WriteNumber(stat.unusedRangeCount);
7501*b7893ccfSSadaf Ebrahimi
7502*b7893ccfSSadaf Ebrahimi json.WriteString("UsedBytes");
7503*b7893ccfSSadaf Ebrahimi json.WriteNumber(stat.usedBytes);
7504*b7893ccfSSadaf Ebrahimi
7505*b7893ccfSSadaf Ebrahimi json.WriteString("UnusedBytes");
7506*b7893ccfSSadaf Ebrahimi json.WriteNumber(stat.unusedBytes);
7507*b7893ccfSSadaf Ebrahimi
7508*b7893ccfSSadaf Ebrahimi if(stat.allocationCount > 1)
7509*b7893ccfSSadaf Ebrahimi {
7510*b7893ccfSSadaf Ebrahimi json.WriteString("AllocationSize");
7511*b7893ccfSSadaf Ebrahimi json.BeginObject(true);
7512*b7893ccfSSadaf Ebrahimi json.WriteString("Min");
7513*b7893ccfSSadaf Ebrahimi json.WriteNumber(stat.allocationSizeMin);
7514*b7893ccfSSadaf Ebrahimi json.WriteString("Avg");
7515*b7893ccfSSadaf Ebrahimi json.WriteNumber(stat.allocationSizeAvg);
7516*b7893ccfSSadaf Ebrahimi json.WriteString("Max");
7517*b7893ccfSSadaf Ebrahimi json.WriteNumber(stat.allocationSizeMax);
7518*b7893ccfSSadaf Ebrahimi json.EndObject();
7519*b7893ccfSSadaf Ebrahimi }
7520*b7893ccfSSadaf Ebrahimi
7521*b7893ccfSSadaf Ebrahimi if(stat.unusedRangeCount > 1)
7522*b7893ccfSSadaf Ebrahimi {
7523*b7893ccfSSadaf Ebrahimi json.WriteString("UnusedRangeSize");
7524*b7893ccfSSadaf Ebrahimi json.BeginObject(true);
7525*b7893ccfSSadaf Ebrahimi json.WriteString("Min");
7526*b7893ccfSSadaf Ebrahimi json.WriteNumber(stat.unusedRangeSizeMin);
7527*b7893ccfSSadaf Ebrahimi json.WriteString("Avg");
7528*b7893ccfSSadaf Ebrahimi json.WriteNumber(stat.unusedRangeSizeAvg);
7529*b7893ccfSSadaf Ebrahimi json.WriteString("Max");
7530*b7893ccfSSadaf Ebrahimi json.WriteNumber(stat.unusedRangeSizeMax);
7531*b7893ccfSSadaf Ebrahimi json.EndObject();
7532*b7893ccfSSadaf Ebrahimi }
7533*b7893ccfSSadaf Ebrahimi
7534*b7893ccfSSadaf Ebrahimi json.EndObject();
7535*b7893ccfSSadaf Ebrahimi }
7536*b7893ccfSSadaf Ebrahimi
7537*b7893ccfSSadaf Ebrahimi #endif // #if VMA_STATS_STRING_ENABLED
7538*b7893ccfSSadaf Ebrahimi
7539*b7893ccfSSadaf Ebrahimi struct VmaSuballocationItemSizeLess
7540*b7893ccfSSadaf Ebrahimi {
operatorVmaSuballocationItemSizeLess7541*b7893ccfSSadaf Ebrahimi bool operator()(
7542*b7893ccfSSadaf Ebrahimi const VmaSuballocationList::iterator lhs,
7543*b7893ccfSSadaf Ebrahimi const VmaSuballocationList::iterator rhs) const
7544*b7893ccfSSadaf Ebrahimi {
7545*b7893ccfSSadaf Ebrahimi return lhs->size < rhs->size;
7546*b7893ccfSSadaf Ebrahimi }
operatorVmaSuballocationItemSizeLess7547*b7893ccfSSadaf Ebrahimi bool operator()(
7548*b7893ccfSSadaf Ebrahimi const VmaSuballocationList::iterator lhs,
7549*b7893ccfSSadaf Ebrahimi VkDeviceSize rhsSize) const
7550*b7893ccfSSadaf Ebrahimi {
7551*b7893ccfSSadaf Ebrahimi return lhs->size < rhsSize;
7552*b7893ccfSSadaf Ebrahimi }
7553*b7893ccfSSadaf Ebrahimi };
7554*b7893ccfSSadaf Ebrahimi
7555*b7893ccfSSadaf Ebrahimi
7556*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
7557*b7893ccfSSadaf Ebrahimi // class VmaBlockMetadata
7558*b7893ccfSSadaf Ebrahimi
VmaBlockMetadata(VmaAllocator hAllocator)7559*b7893ccfSSadaf Ebrahimi VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7560*b7893ccfSSadaf Ebrahimi m_Size(0),
7561*b7893ccfSSadaf Ebrahimi m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7562*b7893ccfSSadaf Ebrahimi {
7563*b7893ccfSSadaf Ebrahimi }
7564*b7893ccfSSadaf Ebrahimi
7565*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
7566*b7893ccfSSadaf Ebrahimi
PrintDetailedMap_Begin(class VmaJsonWriter & json,VkDeviceSize unusedBytes,size_t allocationCount,size_t unusedRangeCount)7567*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7568*b7893ccfSSadaf Ebrahimi VkDeviceSize unusedBytes,
7569*b7893ccfSSadaf Ebrahimi size_t allocationCount,
7570*b7893ccfSSadaf Ebrahimi size_t unusedRangeCount) const
7571*b7893ccfSSadaf Ebrahimi {
7572*b7893ccfSSadaf Ebrahimi json.BeginObject();
7573*b7893ccfSSadaf Ebrahimi
7574*b7893ccfSSadaf Ebrahimi json.WriteString("TotalBytes");
7575*b7893ccfSSadaf Ebrahimi json.WriteNumber(GetSize());
7576*b7893ccfSSadaf Ebrahimi
7577*b7893ccfSSadaf Ebrahimi json.WriteString("UnusedBytes");
7578*b7893ccfSSadaf Ebrahimi json.WriteNumber(unusedBytes);
7579*b7893ccfSSadaf Ebrahimi
7580*b7893ccfSSadaf Ebrahimi json.WriteString("Allocations");
7581*b7893ccfSSadaf Ebrahimi json.WriteNumber((uint64_t)allocationCount);
7582*b7893ccfSSadaf Ebrahimi
7583*b7893ccfSSadaf Ebrahimi json.WriteString("UnusedRanges");
7584*b7893ccfSSadaf Ebrahimi json.WriteNumber((uint64_t)unusedRangeCount);
7585*b7893ccfSSadaf Ebrahimi
7586*b7893ccfSSadaf Ebrahimi json.WriteString("Suballocations");
7587*b7893ccfSSadaf Ebrahimi json.BeginArray();
7588*b7893ccfSSadaf Ebrahimi }
7589*b7893ccfSSadaf Ebrahimi
PrintDetailedMap_Allocation(class VmaJsonWriter & json,VkDeviceSize offset,VmaAllocation hAllocation)7590*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7591*b7893ccfSSadaf Ebrahimi VkDeviceSize offset,
7592*b7893ccfSSadaf Ebrahimi VmaAllocation hAllocation) const
7593*b7893ccfSSadaf Ebrahimi {
7594*b7893ccfSSadaf Ebrahimi json.BeginObject(true);
7595*b7893ccfSSadaf Ebrahimi
7596*b7893ccfSSadaf Ebrahimi json.WriteString("Offset");
7597*b7893ccfSSadaf Ebrahimi json.WriteNumber(offset);
7598*b7893ccfSSadaf Ebrahimi
7599*b7893ccfSSadaf Ebrahimi hAllocation->PrintParameters(json);
7600*b7893ccfSSadaf Ebrahimi
7601*b7893ccfSSadaf Ebrahimi json.EndObject();
7602*b7893ccfSSadaf Ebrahimi }
7603*b7893ccfSSadaf Ebrahimi
PrintDetailedMap_UnusedRange(class VmaJsonWriter & json,VkDeviceSize offset,VkDeviceSize size)7604*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7605*b7893ccfSSadaf Ebrahimi VkDeviceSize offset,
7606*b7893ccfSSadaf Ebrahimi VkDeviceSize size) const
7607*b7893ccfSSadaf Ebrahimi {
7608*b7893ccfSSadaf Ebrahimi json.BeginObject(true);
7609*b7893ccfSSadaf Ebrahimi
7610*b7893ccfSSadaf Ebrahimi json.WriteString("Offset");
7611*b7893ccfSSadaf Ebrahimi json.WriteNumber(offset);
7612*b7893ccfSSadaf Ebrahimi
7613*b7893ccfSSadaf Ebrahimi json.WriteString("Type");
7614*b7893ccfSSadaf Ebrahimi json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7615*b7893ccfSSadaf Ebrahimi
7616*b7893ccfSSadaf Ebrahimi json.WriteString("Size");
7617*b7893ccfSSadaf Ebrahimi json.WriteNumber(size);
7618*b7893ccfSSadaf Ebrahimi
7619*b7893ccfSSadaf Ebrahimi json.EndObject();
7620*b7893ccfSSadaf Ebrahimi }
7621*b7893ccfSSadaf Ebrahimi
PrintDetailedMap_End(class VmaJsonWriter & json)7622*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7623*b7893ccfSSadaf Ebrahimi {
7624*b7893ccfSSadaf Ebrahimi json.EndArray();
7625*b7893ccfSSadaf Ebrahimi json.EndObject();
7626*b7893ccfSSadaf Ebrahimi }
7627*b7893ccfSSadaf Ebrahimi
7628*b7893ccfSSadaf Ebrahimi #endif // #if VMA_STATS_STRING_ENABLED
7629*b7893ccfSSadaf Ebrahimi
7630*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
7631*b7893ccfSSadaf Ebrahimi // class VmaBlockMetadata_Generic
7632*b7893ccfSSadaf Ebrahimi
VmaBlockMetadata_Generic(VmaAllocator hAllocator)7633*b7893ccfSSadaf Ebrahimi VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7634*b7893ccfSSadaf Ebrahimi VmaBlockMetadata(hAllocator),
7635*b7893ccfSSadaf Ebrahimi m_FreeCount(0),
7636*b7893ccfSSadaf Ebrahimi m_SumFreeSize(0),
7637*b7893ccfSSadaf Ebrahimi m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7638*b7893ccfSSadaf Ebrahimi m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7639*b7893ccfSSadaf Ebrahimi {
7640*b7893ccfSSadaf Ebrahimi }
7641*b7893ccfSSadaf Ebrahimi
~VmaBlockMetadata_Generic()7642*b7893ccfSSadaf Ebrahimi VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7643*b7893ccfSSadaf Ebrahimi {
7644*b7893ccfSSadaf Ebrahimi }
7645*b7893ccfSSadaf Ebrahimi
Init(VkDeviceSize size)7646*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7647*b7893ccfSSadaf Ebrahimi {
7648*b7893ccfSSadaf Ebrahimi VmaBlockMetadata::Init(size);
7649*b7893ccfSSadaf Ebrahimi
7650*b7893ccfSSadaf Ebrahimi m_FreeCount = 1;
7651*b7893ccfSSadaf Ebrahimi m_SumFreeSize = size;
7652*b7893ccfSSadaf Ebrahimi
7653*b7893ccfSSadaf Ebrahimi VmaSuballocation suballoc = {};
7654*b7893ccfSSadaf Ebrahimi suballoc.offset = 0;
7655*b7893ccfSSadaf Ebrahimi suballoc.size = size;
7656*b7893ccfSSadaf Ebrahimi suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7657*b7893ccfSSadaf Ebrahimi suballoc.hAllocation = VK_NULL_HANDLE;
7658*b7893ccfSSadaf Ebrahimi
7659*b7893ccfSSadaf Ebrahimi VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7660*b7893ccfSSadaf Ebrahimi m_Suballocations.push_back(suballoc);
7661*b7893ccfSSadaf Ebrahimi VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7662*b7893ccfSSadaf Ebrahimi --suballocItem;
7663*b7893ccfSSadaf Ebrahimi m_FreeSuballocationsBySize.push_back(suballocItem);
7664*b7893ccfSSadaf Ebrahimi }
7665*b7893ccfSSadaf Ebrahimi
Validate()7666*b7893ccfSSadaf Ebrahimi bool VmaBlockMetadata_Generic::Validate() const
7667*b7893ccfSSadaf Ebrahimi {
7668*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(!m_Suballocations.empty());
7669*b7893ccfSSadaf Ebrahimi
7670*b7893ccfSSadaf Ebrahimi // Expected offset of new suballocation as calculated from previous ones.
7671*b7893ccfSSadaf Ebrahimi VkDeviceSize calculatedOffset = 0;
7672*b7893ccfSSadaf Ebrahimi // Expected number of free suballocations as calculated from traversing their list.
7673*b7893ccfSSadaf Ebrahimi uint32_t calculatedFreeCount = 0;
7674*b7893ccfSSadaf Ebrahimi // Expected sum size of free suballocations as calculated from traversing their list.
7675*b7893ccfSSadaf Ebrahimi VkDeviceSize calculatedSumFreeSize = 0;
7676*b7893ccfSSadaf Ebrahimi // Expected number of free suballocations that should be registered in
7677*b7893ccfSSadaf Ebrahimi // m_FreeSuballocationsBySize calculated from traversing their list.
7678*b7893ccfSSadaf Ebrahimi size_t freeSuballocationsToRegister = 0;
7679*b7893ccfSSadaf Ebrahimi // True if previous visited suballocation was free.
7680*b7893ccfSSadaf Ebrahimi bool prevFree = false;
7681*b7893ccfSSadaf Ebrahimi
7682*b7893ccfSSadaf Ebrahimi for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7683*b7893ccfSSadaf Ebrahimi suballocItem != m_Suballocations.cend();
7684*b7893ccfSSadaf Ebrahimi ++suballocItem)
7685*b7893ccfSSadaf Ebrahimi {
7686*b7893ccfSSadaf Ebrahimi const VmaSuballocation& subAlloc = *suballocItem;
7687*b7893ccfSSadaf Ebrahimi
7688*b7893ccfSSadaf Ebrahimi // Actual offset of this suballocation doesn't match expected one.
7689*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7690*b7893ccfSSadaf Ebrahimi
7691*b7893ccfSSadaf Ebrahimi const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7692*b7893ccfSSadaf Ebrahimi // Two adjacent free suballocations are invalid. They should be merged.
7693*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(!prevFree || !currFree);
7694*b7893ccfSSadaf Ebrahimi
7695*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7696*b7893ccfSSadaf Ebrahimi
7697*b7893ccfSSadaf Ebrahimi if(currFree)
7698*b7893ccfSSadaf Ebrahimi {
7699*b7893ccfSSadaf Ebrahimi calculatedSumFreeSize += subAlloc.size;
7700*b7893ccfSSadaf Ebrahimi ++calculatedFreeCount;
7701*b7893ccfSSadaf Ebrahimi if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7702*b7893ccfSSadaf Ebrahimi {
7703*b7893ccfSSadaf Ebrahimi ++freeSuballocationsToRegister;
7704*b7893ccfSSadaf Ebrahimi }
7705*b7893ccfSSadaf Ebrahimi
7706*b7893ccfSSadaf Ebrahimi // Margin required between allocations - every free space must be at least that large.
7707*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7708*b7893ccfSSadaf Ebrahimi }
7709*b7893ccfSSadaf Ebrahimi else
7710*b7893ccfSSadaf Ebrahimi {
7711*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7712*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7713*b7893ccfSSadaf Ebrahimi
7714*b7893ccfSSadaf Ebrahimi // Margin required between allocations - previous allocation must be free.
7715*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7716*b7893ccfSSadaf Ebrahimi }
7717*b7893ccfSSadaf Ebrahimi
7718*b7893ccfSSadaf Ebrahimi calculatedOffset += subAlloc.size;
7719*b7893ccfSSadaf Ebrahimi prevFree = currFree;
7720*b7893ccfSSadaf Ebrahimi }
7721*b7893ccfSSadaf Ebrahimi
7722*b7893ccfSSadaf Ebrahimi // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7723*b7893ccfSSadaf Ebrahimi // match expected one.
7724*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7725*b7893ccfSSadaf Ebrahimi
7726*b7893ccfSSadaf Ebrahimi VkDeviceSize lastSize = 0;
7727*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7728*b7893ccfSSadaf Ebrahimi {
7729*b7893ccfSSadaf Ebrahimi VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7730*b7893ccfSSadaf Ebrahimi
7731*b7893ccfSSadaf Ebrahimi // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7732*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7733*b7893ccfSSadaf Ebrahimi // They must be sorted by size ascending.
7734*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(suballocItem->size >= lastSize);
7735*b7893ccfSSadaf Ebrahimi
7736*b7893ccfSSadaf Ebrahimi lastSize = suballocItem->size;
7737*b7893ccfSSadaf Ebrahimi }
7738*b7893ccfSSadaf Ebrahimi
7739*b7893ccfSSadaf Ebrahimi // Check if totals match calculacted values.
7740*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(ValidateFreeSuballocationList());
7741*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(calculatedOffset == GetSize());
7742*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7743*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7744*b7893ccfSSadaf Ebrahimi
7745*b7893ccfSSadaf Ebrahimi return true;
7746*b7893ccfSSadaf Ebrahimi }
7747*b7893ccfSSadaf Ebrahimi
GetUnusedRangeSizeMax()7748*b7893ccfSSadaf Ebrahimi VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7749*b7893ccfSSadaf Ebrahimi {
7750*b7893ccfSSadaf Ebrahimi if(!m_FreeSuballocationsBySize.empty())
7751*b7893ccfSSadaf Ebrahimi {
7752*b7893ccfSSadaf Ebrahimi return m_FreeSuballocationsBySize.back()->size;
7753*b7893ccfSSadaf Ebrahimi }
7754*b7893ccfSSadaf Ebrahimi else
7755*b7893ccfSSadaf Ebrahimi {
7756*b7893ccfSSadaf Ebrahimi return 0;
7757*b7893ccfSSadaf Ebrahimi }
7758*b7893ccfSSadaf Ebrahimi }
7759*b7893ccfSSadaf Ebrahimi
IsEmpty()7760*b7893ccfSSadaf Ebrahimi bool VmaBlockMetadata_Generic::IsEmpty() const
7761*b7893ccfSSadaf Ebrahimi {
7762*b7893ccfSSadaf Ebrahimi return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7763*b7893ccfSSadaf Ebrahimi }
7764*b7893ccfSSadaf Ebrahimi
CalcAllocationStatInfo(VmaStatInfo & outInfo)7765*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7766*b7893ccfSSadaf Ebrahimi {
7767*b7893ccfSSadaf Ebrahimi outInfo.blockCount = 1;
7768*b7893ccfSSadaf Ebrahimi
7769*b7893ccfSSadaf Ebrahimi const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7770*b7893ccfSSadaf Ebrahimi outInfo.allocationCount = rangeCount - m_FreeCount;
7771*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeCount = m_FreeCount;
7772*b7893ccfSSadaf Ebrahimi
7773*b7893ccfSSadaf Ebrahimi outInfo.unusedBytes = m_SumFreeSize;
7774*b7893ccfSSadaf Ebrahimi outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7775*b7893ccfSSadaf Ebrahimi
7776*b7893ccfSSadaf Ebrahimi outInfo.allocationSizeMin = UINT64_MAX;
7777*b7893ccfSSadaf Ebrahimi outInfo.allocationSizeMax = 0;
7778*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMin = UINT64_MAX;
7779*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMax = 0;
7780*b7893ccfSSadaf Ebrahimi
7781*b7893ccfSSadaf Ebrahimi for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7782*b7893ccfSSadaf Ebrahimi suballocItem != m_Suballocations.cend();
7783*b7893ccfSSadaf Ebrahimi ++suballocItem)
7784*b7893ccfSSadaf Ebrahimi {
7785*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = *suballocItem;
7786*b7893ccfSSadaf Ebrahimi if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7787*b7893ccfSSadaf Ebrahimi {
7788*b7893ccfSSadaf Ebrahimi outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7789*b7893ccfSSadaf Ebrahimi outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7790*b7893ccfSSadaf Ebrahimi }
7791*b7893ccfSSadaf Ebrahimi else
7792*b7893ccfSSadaf Ebrahimi {
7793*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7794*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7795*b7893ccfSSadaf Ebrahimi }
7796*b7893ccfSSadaf Ebrahimi }
7797*b7893ccfSSadaf Ebrahimi }
7798*b7893ccfSSadaf Ebrahimi
AddPoolStats(VmaPoolStats & inoutStats)7799*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7800*b7893ccfSSadaf Ebrahimi {
7801*b7893ccfSSadaf Ebrahimi const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7802*b7893ccfSSadaf Ebrahimi
7803*b7893ccfSSadaf Ebrahimi inoutStats.size += GetSize();
7804*b7893ccfSSadaf Ebrahimi inoutStats.unusedSize += m_SumFreeSize;
7805*b7893ccfSSadaf Ebrahimi inoutStats.allocationCount += rangeCount - m_FreeCount;
7806*b7893ccfSSadaf Ebrahimi inoutStats.unusedRangeCount += m_FreeCount;
7807*b7893ccfSSadaf Ebrahimi inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7808*b7893ccfSSadaf Ebrahimi }
7809*b7893ccfSSadaf Ebrahimi
7810*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
7811*b7893ccfSSadaf Ebrahimi
PrintDetailedMap(class VmaJsonWriter & json)7812*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7813*b7893ccfSSadaf Ebrahimi {
7814*b7893ccfSSadaf Ebrahimi PrintDetailedMap_Begin(json,
7815*b7893ccfSSadaf Ebrahimi m_SumFreeSize, // unusedBytes
7816*b7893ccfSSadaf Ebrahimi m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7817*b7893ccfSSadaf Ebrahimi m_FreeCount); // unusedRangeCount
7818*b7893ccfSSadaf Ebrahimi
7819*b7893ccfSSadaf Ebrahimi size_t i = 0;
7820*b7893ccfSSadaf Ebrahimi for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7821*b7893ccfSSadaf Ebrahimi suballocItem != m_Suballocations.cend();
7822*b7893ccfSSadaf Ebrahimi ++suballocItem, ++i)
7823*b7893ccfSSadaf Ebrahimi {
7824*b7893ccfSSadaf Ebrahimi if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7825*b7893ccfSSadaf Ebrahimi {
7826*b7893ccfSSadaf Ebrahimi PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7827*b7893ccfSSadaf Ebrahimi }
7828*b7893ccfSSadaf Ebrahimi else
7829*b7893ccfSSadaf Ebrahimi {
7830*b7893ccfSSadaf Ebrahimi PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7831*b7893ccfSSadaf Ebrahimi }
7832*b7893ccfSSadaf Ebrahimi }
7833*b7893ccfSSadaf Ebrahimi
7834*b7893ccfSSadaf Ebrahimi PrintDetailedMap_End(json);
7835*b7893ccfSSadaf Ebrahimi }
7836*b7893ccfSSadaf Ebrahimi
7837*b7893ccfSSadaf Ebrahimi #endif // #if VMA_STATS_STRING_ENABLED
7838*b7893ccfSSadaf Ebrahimi
CreateAllocationRequest(uint32_t currentFrameIndex,uint32_t frameInUseCount,VkDeviceSize bufferImageGranularity,VkDeviceSize allocSize,VkDeviceSize allocAlignment,bool upperAddress,VmaSuballocationType allocType,bool canMakeOtherLost,uint32_t strategy,VmaAllocationRequest * pAllocationRequest)7839*b7893ccfSSadaf Ebrahimi bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7840*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
7841*b7893ccfSSadaf Ebrahimi uint32_t frameInUseCount,
7842*b7893ccfSSadaf Ebrahimi VkDeviceSize bufferImageGranularity,
7843*b7893ccfSSadaf Ebrahimi VkDeviceSize allocSize,
7844*b7893ccfSSadaf Ebrahimi VkDeviceSize allocAlignment,
7845*b7893ccfSSadaf Ebrahimi bool upperAddress,
7846*b7893ccfSSadaf Ebrahimi VmaSuballocationType allocType,
7847*b7893ccfSSadaf Ebrahimi bool canMakeOtherLost,
7848*b7893ccfSSadaf Ebrahimi uint32_t strategy,
7849*b7893ccfSSadaf Ebrahimi VmaAllocationRequest* pAllocationRequest)
7850*b7893ccfSSadaf Ebrahimi {
7851*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocSize > 0);
7852*b7893ccfSSadaf Ebrahimi VMA_ASSERT(!upperAddress);
7853*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7854*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pAllocationRequest != VMA_NULL);
7855*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(Validate());
7856*b7893ccfSSadaf Ebrahimi
7857*b7893ccfSSadaf Ebrahimi // There is not enough total free space in this block to fullfill the request: Early return.
7858*b7893ccfSSadaf Ebrahimi if(canMakeOtherLost == false &&
7859*b7893ccfSSadaf Ebrahimi m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7860*b7893ccfSSadaf Ebrahimi {
7861*b7893ccfSSadaf Ebrahimi return false;
7862*b7893ccfSSadaf Ebrahimi }
7863*b7893ccfSSadaf Ebrahimi
7864*b7893ccfSSadaf Ebrahimi // New algorithm, efficiently searching freeSuballocationsBySize.
7865*b7893ccfSSadaf Ebrahimi const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7866*b7893ccfSSadaf Ebrahimi if(freeSuballocCount > 0)
7867*b7893ccfSSadaf Ebrahimi {
7868*b7893ccfSSadaf Ebrahimi if(strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT)
7869*b7893ccfSSadaf Ebrahimi {
7870*b7893ccfSSadaf Ebrahimi // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7871*b7893ccfSSadaf Ebrahimi VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7872*b7893ccfSSadaf Ebrahimi m_FreeSuballocationsBySize.data(),
7873*b7893ccfSSadaf Ebrahimi m_FreeSuballocationsBySize.data() + freeSuballocCount,
7874*b7893ccfSSadaf Ebrahimi allocSize + 2 * VMA_DEBUG_MARGIN,
7875*b7893ccfSSadaf Ebrahimi VmaSuballocationItemSizeLess());
7876*b7893ccfSSadaf Ebrahimi size_t index = it - m_FreeSuballocationsBySize.data();
7877*b7893ccfSSadaf Ebrahimi for(; index < freeSuballocCount; ++index)
7878*b7893ccfSSadaf Ebrahimi {
7879*b7893ccfSSadaf Ebrahimi if(CheckAllocation(
7880*b7893ccfSSadaf Ebrahimi currentFrameIndex,
7881*b7893ccfSSadaf Ebrahimi frameInUseCount,
7882*b7893ccfSSadaf Ebrahimi bufferImageGranularity,
7883*b7893ccfSSadaf Ebrahimi allocSize,
7884*b7893ccfSSadaf Ebrahimi allocAlignment,
7885*b7893ccfSSadaf Ebrahimi allocType,
7886*b7893ccfSSadaf Ebrahimi m_FreeSuballocationsBySize[index],
7887*b7893ccfSSadaf Ebrahimi false, // canMakeOtherLost
7888*b7893ccfSSadaf Ebrahimi &pAllocationRequest->offset,
7889*b7893ccfSSadaf Ebrahimi &pAllocationRequest->itemsToMakeLostCount,
7890*b7893ccfSSadaf Ebrahimi &pAllocationRequest->sumFreeSize,
7891*b7893ccfSSadaf Ebrahimi &pAllocationRequest->sumItemSize))
7892*b7893ccfSSadaf Ebrahimi {
7893*b7893ccfSSadaf Ebrahimi pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7894*b7893ccfSSadaf Ebrahimi return true;
7895*b7893ccfSSadaf Ebrahimi }
7896*b7893ccfSSadaf Ebrahimi }
7897*b7893ccfSSadaf Ebrahimi }
7898*b7893ccfSSadaf Ebrahimi else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7899*b7893ccfSSadaf Ebrahimi {
7900*b7893ccfSSadaf Ebrahimi for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7901*b7893ccfSSadaf Ebrahimi it != m_Suballocations.end();
7902*b7893ccfSSadaf Ebrahimi ++it)
7903*b7893ccfSSadaf Ebrahimi {
7904*b7893ccfSSadaf Ebrahimi if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7905*b7893ccfSSadaf Ebrahimi currentFrameIndex,
7906*b7893ccfSSadaf Ebrahimi frameInUseCount,
7907*b7893ccfSSadaf Ebrahimi bufferImageGranularity,
7908*b7893ccfSSadaf Ebrahimi allocSize,
7909*b7893ccfSSadaf Ebrahimi allocAlignment,
7910*b7893ccfSSadaf Ebrahimi allocType,
7911*b7893ccfSSadaf Ebrahimi it,
7912*b7893ccfSSadaf Ebrahimi false, // canMakeOtherLost
7913*b7893ccfSSadaf Ebrahimi &pAllocationRequest->offset,
7914*b7893ccfSSadaf Ebrahimi &pAllocationRequest->itemsToMakeLostCount,
7915*b7893ccfSSadaf Ebrahimi &pAllocationRequest->sumFreeSize,
7916*b7893ccfSSadaf Ebrahimi &pAllocationRequest->sumItemSize))
7917*b7893ccfSSadaf Ebrahimi {
7918*b7893ccfSSadaf Ebrahimi pAllocationRequest->item = it;
7919*b7893ccfSSadaf Ebrahimi return true;
7920*b7893ccfSSadaf Ebrahimi }
7921*b7893ccfSSadaf Ebrahimi }
7922*b7893ccfSSadaf Ebrahimi }
7923*b7893ccfSSadaf Ebrahimi else // WORST_FIT, FIRST_FIT
7924*b7893ccfSSadaf Ebrahimi {
7925*b7893ccfSSadaf Ebrahimi // Search staring from biggest suballocations.
7926*b7893ccfSSadaf Ebrahimi for(size_t index = freeSuballocCount; index--; )
7927*b7893ccfSSadaf Ebrahimi {
7928*b7893ccfSSadaf Ebrahimi if(CheckAllocation(
7929*b7893ccfSSadaf Ebrahimi currentFrameIndex,
7930*b7893ccfSSadaf Ebrahimi frameInUseCount,
7931*b7893ccfSSadaf Ebrahimi bufferImageGranularity,
7932*b7893ccfSSadaf Ebrahimi allocSize,
7933*b7893ccfSSadaf Ebrahimi allocAlignment,
7934*b7893ccfSSadaf Ebrahimi allocType,
7935*b7893ccfSSadaf Ebrahimi m_FreeSuballocationsBySize[index],
7936*b7893ccfSSadaf Ebrahimi false, // canMakeOtherLost
7937*b7893ccfSSadaf Ebrahimi &pAllocationRequest->offset,
7938*b7893ccfSSadaf Ebrahimi &pAllocationRequest->itemsToMakeLostCount,
7939*b7893ccfSSadaf Ebrahimi &pAllocationRequest->sumFreeSize,
7940*b7893ccfSSadaf Ebrahimi &pAllocationRequest->sumItemSize))
7941*b7893ccfSSadaf Ebrahimi {
7942*b7893ccfSSadaf Ebrahimi pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7943*b7893ccfSSadaf Ebrahimi return true;
7944*b7893ccfSSadaf Ebrahimi }
7945*b7893ccfSSadaf Ebrahimi }
7946*b7893ccfSSadaf Ebrahimi }
7947*b7893ccfSSadaf Ebrahimi }
7948*b7893ccfSSadaf Ebrahimi
7949*b7893ccfSSadaf Ebrahimi if(canMakeOtherLost)
7950*b7893ccfSSadaf Ebrahimi {
7951*b7893ccfSSadaf Ebrahimi // Brute-force algorithm. TODO: Come up with something better.
7952*b7893ccfSSadaf Ebrahimi
7953*b7893ccfSSadaf Ebrahimi pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7954*b7893ccfSSadaf Ebrahimi pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7955*b7893ccfSSadaf Ebrahimi
7956*b7893ccfSSadaf Ebrahimi VmaAllocationRequest tmpAllocRequest = {};
7957*b7893ccfSSadaf Ebrahimi for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7958*b7893ccfSSadaf Ebrahimi suballocIt != m_Suballocations.end();
7959*b7893ccfSSadaf Ebrahimi ++suballocIt)
7960*b7893ccfSSadaf Ebrahimi {
7961*b7893ccfSSadaf Ebrahimi if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7962*b7893ccfSSadaf Ebrahimi suballocIt->hAllocation->CanBecomeLost())
7963*b7893ccfSSadaf Ebrahimi {
7964*b7893ccfSSadaf Ebrahimi if(CheckAllocation(
7965*b7893ccfSSadaf Ebrahimi currentFrameIndex,
7966*b7893ccfSSadaf Ebrahimi frameInUseCount,
7967*b7893ccfSSadaf Ebrahimi bufferImageGranularity,
7968*b7893ccfSSadaf Ebrahimi allocSize,
7969*b7893ccfSSadaf Ebrahimi allocAlignment,
7970*b7893ccfSSadaf Ebrahimi allocType,
7971*b7893ccfSSadaf Ebrahimi suballocIt,
7972*b7893ccfSSadaf Ebrahimi canMakeOtherLost,
7973*b7893ccfSSadaf Ebrahimi &tmpAllocRequest.offset,
7974*b7893ccfSSadaf Ebrahimi &tmpAllocRequest.itemsToMakeLostCount,
7975*b7893ccfSSadaf Ebrahimi &tmpAllocRequest.sumFreeSize,
7976*b7893ccfSSadaf Ebrahimi &tmpAllocRequest.sumItemSize))
7977*b7893ccfSSadaf Ebrahimi {
7978*b7893ccfSSadaf Ebrahimi tmpAllocRequest.item = suballocIt;
7979*b7893ccfSSadaf Ebrahimi
7980*b7893ccfSSadaf Ebrahimi if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7981*b7893ccfSSadaf Ebrahimi strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT)
7982*b7893ccfSSadaf Ebrahimi {
7983*b7893ccfSSadaf Ebrahimi *pAllocationRequest = tmpAllocRequest;
7984*b7893ccfSSadaf Ebrahimi }
7985*b7893ccfSSadaf Ebrahimi }
7986*b7893ccfSSadaf Ebrahimi }
7987*b7893ccfSSadaf Ebrahimi }
7988*b7893ccfSSadaf Ebrahimi
7989*b7893ccfSSadaf Ebrahimi if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7990*b7893ccfSSadaf Ebrahimi {
7991*b7893ccfSSadaf Ebrahimi return true;
7992*b7893ccfSSadaf Ebrahimi }
7993*b7893ccfSSadaf Ebrahimi }
7994*b7893ccfSSadaf Ebrahimi
7995*b7893ccfSSadaf Ebrahimi return false;
7996*b7893ccfSSadaf Ebrahimi }
7997*b7893ccfSSadaf Ebrahimi
MakeRequestedAllocationsLost(uint32_t currentFrameIndex,uint32_t frameInUseCount,VmaAllocationRequest * pAllocationRequest)7998*b7893ccfSSadaf Ebrahimi bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7999*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
8000*b7893ccfSSadaf Ebrahimi uint32_t frameInUseCount,
8001*b7893ccfSSadaf Ebrahimi VmaAllocationRequest* pAllocationRequest)
8002*b7893ccfSSadaf Ebrahimi {
8003*b7893ccfSSadaf Ebrahimi while(pAllocationRequest->itemsToMakeLostCount > 0)
8004*b7893ccfSSadaf Ebrahimi {
8005*b7893ccfSSadaf Ebrahimi if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8006*b7893ccfSSadaf Ebrahimi {
8007*b7893ccfSSadaf Ebrahimi ++pAllocationRequest->item;
8008*b7893ccfSSadaf Ebrahimi }
8009*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8010*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8011*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8012*b7893ccfSSadaf Ebrahimi if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8013*b7893ccfSSadaf Ebrahimi {
8014*b7893ccfSSadaf Ebrahimi pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8015*b7893ccfSSadaf Ebrahimi --pAllocationRequest->itemsToMakeLostCount;
8016*b7893ccfSSadaf Ebrahimi }
8017*b7893ccfSSadaf Ebrahimi else
8018*b7893ccfSSadaf Ebrahimi {
8019*b7893ccfSSadaf Ebrahimi return false;
8020*b7893ccfSSadaf Ebrahimi }
8021*b7893ccfSSadaf Ebrahimi }
8022*b7893ccfSSadaf Ebrahimi
8023*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(Validate());
8024*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8025*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8026*b7893ccfSSadaf Ebrahimi
8027*b7893ccfSSadaf Ebrahimi return true;
8028*b7893ccfSSadaf Ebrahimi }
8029*b7893ccfSSadaf Ebrahimi
MakeAllocationsLost(uint32_t currentFrameIndex,uint32_t frameInUseCount)8030*b7893ccfSSadaf Ebrahimi uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8031*b7893ccfSSadaf Ebrahimi {
8032*b7893ccfSSadaf Ebrahimi uint32_t lostAllocationCount = 0;
8033*b7893ccfSSadaf Ebrahimi for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8034*b7893ccfSSadaf Ebrahimi it != m_Suballocations.end();
8035*b7893ccfSSadaf Ebrahimi ++it)
8036*b7893ccfSSadaf Ebrahimi {
8037*b7893ccfSSadaf Ebrahimi if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8038*b7893ccfSSadaf Ebrahimi it->hAllocation->CanBecomeLost() &&
8039*b7893ccfSSadaf Ebrahimi it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8040*b7893ccfSSadaf Ebrahimi {
8041*b7893ccfSSadaf Ebrahimi it = FreeSuballocation(it);
8042*b7893ccfSSadaf Ebrahimi ++lostAllocationCount;
8043*b7893ccfSSadaf Ebrahimi }
8044*b7893ccfSSadaf Ebrahimi }
8045*b7893ccfSSadaf Ebrahimi return lostAllocationCount;
8046*b7893ccfSSadaf Ebrahimi }
8047*b7893ccfSSadaf Ebrahimi
CheckCorruption(const void * pBlockData)8048*b7893ccfSSadaf Ebrahimi VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8049*b7893ccfSSadaf Ebrahimi {
8050*b7893ccfSSadaf Ebrahimi for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8051*b7893ccfSSadaf Ebrahimi it != m_Suballocations.end();
8052*b7893ccfSSadaf Ebrahimi ++it)
8053*b7893ccfSSadaf Ebrahimi {
8054*b7893ccfSSadaf Ebrahimi if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8055*b7893ccfSSadaf Ebrahimi {
8056*b7893ccfSSadaf Ebrahimi if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8057*b7893ccfSSadaf Ebrahimi {
8058*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8059*b7893ccfSSadaf Ebrahimi return VK_ERROR_VALIDATION_FAILED_EXT;
8060*b7893ccfSSadaf Ebrahimi }
8061*b7893ccfSSadaf Ebrahimi if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8062*b7893ccfSSadaf Ebrahimi {
8063*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8064*b7893ccfSSadaf Ebrahimi return VK_ERROR_VALIDATION_FAILED_EXT;
8065*b7893ccfSSadaf Ebrahimi }
8066*b7893ccfSSadaf Ebrahimi }
8067*b7893ccfSSadaf Ebrahimi }
8068*b7893ccfSSadaf Ebrahimi
8069*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
8070*b7893ccfSSadaf Ebrahimi }
8071*b7893ccfSSadaf Ebrahimi
Alloc(const VmaAllocationRequest & request,VmaSuballocationType type,VkDeviceSize allocSize,bool upperAddress,VmaAllocation hAllocation)8072*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Generic::Alloc(
8073*b7893ccfSSadaf Ebrahimi const VmaAllocationRequest& request,
8074*b7893ccfSSadaf Ebrahimi VmaSuballocationType type,
8075*b7893ccfSSadaf Ebrahimi VkDeviceSize allocSize,
8076*b7893ccfSSadaf Ebrahimi bool upperAddress,
8077*b7893ccfSSadaf Ebrahimi VmaAllocation hAllocation)
8078*b7893ccfSSadaf Ebrahimi {
8079*b7893ccfSSadaf Ebrahimi VMA_ASSERT(!upperAddress);
8080*b7893ccfSSadaf Ebrahimi VMA_ASSERT(request.item != m_Suballocations.end());
8081*b7893ccfSSadaf Ebrahimi VmaSuballocation& suballoc = *request.item;
8082*b7893ccfSSadaf Ebrahimi // Given suballocation is a free block.
8083*b7893ccfSSadaf Ebrahimi VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8084*b7893ccfSSadaf Ebrahimi // Given offset is inside this suballocation.
8085*b7893ccfSSadaf Ebrahimi VMA_ASSERT(request.offset >= suballoc.offset);
8086*b7893ccfSSadaf Ebrahimi const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8087*b7893ccfSSadaf Ebrahimi VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8088*b7893ccfSSadaf Ebrahimi const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8089*b7893ccfSSadaf Ebrahimi
8090*b7893ccfSSadaf Ebrahimi // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8091*b7893ccfSSadaf Ebrahimi // it to become used.
8092*b7893ccfSSadaf Ebrahimi UnregisterFreeSuballocation(request.item);
8093*b7893ccfSSadaf Ebrahimi
8094*b7893ccfSSadaf Ebrahimi suballoc.offset = request.offset;
8095*b7893ccfSSadaf Ebrahimi suballoc.size = allocSize;
8096*b7893ccfSSadaf Ebrahimi suballoc.type = type;
8097*b7893ccfSSadaf Ebrahimi suballoc.hAllocation = hAllocation;
8098*b7893ccfSSadaf Ebrahimi
8099*b7893ccfSSadaf Ebrahimi // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8100*b7893ccfSSadaf Ebrahimi if(paddingEnd)
8101*b7893ccfSSadaf Ebrahimi {
8102*b7893ccfSSadaf Ebrahimi VmaSuballocation paddingSuballoc = {};
8103*b7893ccfSSadaf Ebrahimi paddingSuballoc.offset = request.offset + allocSize;
8104*b7893ccfSSadaf Ebrahimi paddingSuballoc.size = paddingEnd;
8105*b7893ccfSSadaf Ebrahimi paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8106*b7893ccfSSadaf Ebrahimi VmaSuballocationList::iterator next = request.item;
8107*b7893ccfSSadaf Ebrahimi ++next;
8108*b7893ccfSSadaf Ebrahimi const VmaSuballocationList::iterator paddingEndItem =
8109*b7893ccfSSadaf Ebrahimi m_Suballocations.insert(next, paddingSuballoc);
8110*b7893ccfSSadaf Ebrahimi RegisterFreeSuballocation(paddingEndItem);
8111*b7893ccfSSadaf Ebrahimi }
8112*b7893ccfSSadaf Ebrahimi
8113*b7893ccfSSadaf Ebrahimi // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8114*b7893ccfSSadaf Ebrahimi if(paddingBegin)
8115*b7893ccfSSadaf Ebrahimi {
8116*b7893ccfSSadaf Ebrahimi VmaSuballocation paddingSuballoc = {};
8117*b7893ccfSSadaf Ebrahimi paddingSuballoc.offset = request.offset - paddingBegin;
8118*b7893ccfSSadaf Ebrahimi paddingSuballoc.size = paddingBegin;
8119*b7893ccfSSadaf Ebrahimi paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8120*b7893ccfSSadaf Ebrahimi const VmaSuballocationList::iterator paddingBeginItem =
8121*b7893ccfSSadaf Ebrahimi m_Suballocations.insert(request.item, paddingSuballoc);
8122*b7893ccfSSadaf Ebrahimi RegisterFreeSuballocation(paddingBeginItem);
8123*b7893ccfSSadaf Ebrahimi }
8124*b7893ccfSSadaf Ebrahimi
8125*b7893ccfSSadaf Ebrahimi // Update totals.
8126*b7893ccfSSadaf Ebrahimi m_FreeCount = m_FreeCount - 1;
8127*b7893ccfSSadaf Ebrahimi if(paddingBegin > 0)
8128*b7893ccfSSadaf Ebrahimi {
8129*b7893ccfSSadaf Ebrahimi ++m_FreeCount;
8130*b7893ccfSSadaf Ebrahimi }
8131*b7893ccfSSadaf Ebrahimi if(paddingEnd > 0)
8132*b7893ccfSSadaf Ebrahimi {
8133*b7893ccfSSadaf Ebrahimi ++m_FreeCount;
8134*b7893ccfSSadaf Ebrahimi }
8135*b7893ccfSSadaf Ebrahimi m_SumFreeSize -= allocSize;
8136*b7893ccfSSadaf Ebrahimi }
8137*b7893ccfSSadaf Ebrahimi
Free(const VmaAllocation allocation)8138*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8139*b7893ccfSSadaf Ebrahimi {
8140*b7893ccfSSadaf Ebrahimi for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8141*b7893ccfSSadaf Ebrahimi suballocItem != m_Suballocations.end();
8142*b7893ccfSSadaf Ebrahimi ++suballocItem)
8143*b7893ccfSSadaf Ebrahimi {
8144*b7893ccfSSadaf Ebrahimi VmaSuballocation& suballoc = *suballocItem;
8145*b7893ccfSSadaf Ebrahimi if(suballoc.hAllocation == allocation)
8146*b7893ccfSSadaf Ebrahimi {
8147*b7893ccfSSadaf Ebrahimi FreeSuballocation(suballocItem);
8148*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(Validate());
8149*b7893ccfSSadaf Ebrahimi return;
8150*b7893ccfSSadaf Ebrahimi }
8151*b7893ccfSSadaf Ebrahimi }
8152*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "Not found!");
8153*b7893ccfSSadaf Ebrahimi }
8154*b7893ccfSSadaf Ebrahimi
FreeAtOffset(VkDeviceSize offset)8155*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8156*b7893ccfSSadaf Ebrahimi {
8157*b7893ccfSSadaf Ebrahimi for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8158*b7893ccfSSadaf Ebrahimi suballocItem != m_Suballocations.end();
8159*b7893ccfSSadaf Ebrahimi ++suballocItem)
8160*b7893ccfSSadaf Ebrahimi {
8161*b7893ccfSSadaf Ebrahimi VmaSuballocation& suballoc = *suballocItem;
8162*b7893ccfSSadaf Ebrahimi if(suballoc.offset == offset)
8163*b7893ccfSSadaf Ebrahimi {
8164*b7893ccfSSadaf Ebrahimi FreeSuballocation(suballocItem);
8165*b7893ccfSSadaf Ebrahimi return;
8166*b7893ccfSSadaf Ebrahimi }
8167*b7893ccfSSadaf Ebrahimi }
8168*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "Not found!");
8169*b7893ccfSSadaf Ebrahimi }
8170*b7893ccfSSadaf Ebrahimi
ResizeAllocation(const VmaAllocation alloc,VkDeviceSize newSize)8171*b7893ccfSSadaf Ebrahimi bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8172*b7893ccfSSadaf Ebrahimi {
8173*b7893ccfSSadaf Ebrahimi typedef VmaSuballocationList::iterator iter_type;
8174*b7893ccfSSadaf Ebrahimi for(iter_type suballocItem = m_Suballocations.begin();
8175*b7893ccfSSadaf Ebrahimi suballocItem != m_Suballocations.end();
8176*b7893ccfSSadaf Ebrahimi ++suballocItem)
8177*b7893ccfSSadaf Ebrahimi {
8178*b7893ccfSSadaf Ebrahimi VmaSuballocation& suballoc = *suballocItem;
8179*b7893ccfSSadaf Ebrahimi if(suballoc.hAllocation == alloc)
8180*b7893ccfSSadaf Ebrahimi {
8181*b7893ccfSSadaf Ebrahimi iter_type nextItem = suballocItem;
8182*b7893ccfSSadaf Ebrahimi ++nextItem;
8183*b7893ccfSSadaf Ebrahimi
8184*b7893ccfSSadaf Ebrahimi // Should have been ensured on higher level.
8185*b7893ccfSSadaf Ebrahimi VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8186*b7893ccfSSadaf Ebrahimi
8187*b7893ccfSSadaf Ebrahimi // Shrinking.
8188*b7893ccfSSadaf Ebrahimi if(newSize < alloc->GetSize())
8189*b7893ccfSSadaf Ebrahimi {
8190*b7893ccfSSadaf Ebrahimi const VkDeviceSize sizeDiff = suballoc.size - newSize;
8191*b7893ccfSSadaf Ebrahimi
8192*b7893ccfSSadaf Ebrahimi // There is next item.
8193*b7893ccfSSadaf Ebrahimi if(nextItem != m_Suballocations.end())
8194*b7893ccfSSadaf Ebrahimi {
8195*b7893ccfSSadaf Ebrahimi // Next item is free.
8196*b7893ccfSSadaf Ebrahimi if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8197*b7893ccfSSadaf Ebrahimi {
8198*b7893ccfSSadaf Ebrahimi // Grow this next item backward.
8199*b7893ccfSSadaf Ebrahimi UnregisterFreeSuballocation(nextItem);
8200*b7893ccfSSadaf Ebrahimi nextItem->offset -= sizeDiff;
8201*b7893ccfSSadaf Ebrahimi nextItem->size += sizeDiff;
8202*b7893ccfSSadaf Ebrahimi RegisterFreeSuballocation(nextItem);
8203*b7893ccfSSadaf Ebrahimi }
8204*b7893ccfSSadaf Ebrahimi // Next item is not free.
8205*b7893ccfSSadaf Ebrahimi else
8206*b7893ccfSSadaf Ebrahimi {
8207*b7893ccfSSadaf Ebrahimi // Create free item after current one.
8208*b7893ccfSSadaf Ebrahimi VmaSuballocation newFreeSuballoc;
8209*b7893ccfSSadaf Ebrahimi newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8210*b7893ccfSSadaf Ebrahimi newFreeSuballoc.offset = suballoc.offset + newSize;
8211*b7893ccfSSadaf Ebrahimi newFreeSuballoc.size = sizeDiff;
8212*b7893ccfSSadaf Ebrahimi newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8213*b7893ccfSSadaf Ebrahimi iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8214*b7893ccfSSadaf Ebrahimi RegisterFreeSuballocation(newFreeSuballocIt);
8215*b7893ccfSSadaf Ebrahimi
8216*b7893ccfSSadaf Ebrahimi ++m_FreeCount;
8217*b7893ccfSSadaf Ebrahimi }
8218*b7893ccfSSadaf Ebrahimi }
8219*b7893ccfSSadaf Ebrahimi // This is the last item.
8220*b7893ccfSSadaf Ebrahimi else
8221*b7893ccfSSadaf Ebrahimi {
8222*b7893ccfSSadaf Ebrahimi // Create free item at the end.
8223*b7893ccfSSadaf Ebrahimi VmaSuballocation newFreeSuballoc;
8224*b7893ccfSSadaf Ebrahimi newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8225*b7893ccfSSadaf Ebrahimi newFreeSuballoc.offset = suballoc.offset + newSize;
8226*b7893ccfSSadaf Ebrahimi newFreeSuballoc.size = sizeDiff;
8227*b7893ccfSSadaf Ebrahimi newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8228*b7893ccfSSadaf Ebrahimi m_Suballocations.push_back(newFreeSuballoc);
8229*b7893ccfSSadaf Ebrahimi
8230*b7893ccfSSadaf Ebrahimi iter_type newFreeSuballocIt = m_Suballocations.end();
8231*b7893ccfSSadaf Ebrahimi RegisterFreeSuballocation(--newFreeSuballocIt);
8232*b7893ccfSSadaf Ebrahimi
8233*b7893ccfSSadaf Ebrahimi ++m_FreeCount;
8234*b7893ccfSSadaf Ebrahimi }
8235*b7893ccfSSadaf Ebrahimi
8236*b7893ccfSSadaf Ebrahimi suballoc.size = newSize;
8237*b7893ccfSSadaf Ebrahimi m_SumFreeSize += sizeDiff;
8238*b7893ccfSSadaf Ebrahimi }
8239*b7893ccfSSadaf Ebrahimi // Growing.
8240*b7893ccfSSadaf Ebrahimi else
8241*b7893ccfSSadaf Ebrahimi {
8242*b7893ccfSSadaf Ebrahimi const VkDeviceSize sizeDiff = newSize - suballoc.size;
8243*b7893ccfSSadaf Ebrahimi
8244*b7893ccfSSadaf Ebrahimi // There is next item.
8245*b7893ccfSSadaf Ebrahimi if(nextItem != m_Suballocations.end())
8246*b7893ccfSSadaf Ebrahimi {
8247*b7893ccfSSadaf Ebrahimi // Next item is free.
8248*b7893ccfSSadaf Ebrahimi if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8249*b7893ccfSSadaf Ebrahimi {
8250*b7893ccfSSadaf Ebrahimi // There is not enough free space, including margin.
8251*b7893ccfSSadaf Ebrahimi if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8252*b7893ccfSSadaf Ebrahimi {
8253*b7893ccfSSadaf Ebrahimi return false;
8254*b7893ccfSSadaf Ebrahimi }
8255*b7893ccfSSadaf Ebrahimi
8256*b7893ccfSSadaf Ebrahimi // There is more free space than required.
8257*b7893ccfSSadaf Ebrahimi if(nextItem->size > sizeDiff)
8258*b7893ccfSSadaf Ebrahimi {
8259*b7893ccfSSadaf Ebrahimi // Move and shrink this next item.
8260*b7893ccfSSadaf Ebrahimi UnregisterFreeSuballocation(nextItem);
8261*b7893ccfSSadaf Ebrahimi nextItem->offset += sizeDiff;
8262*b7893ccfSSadaf Ebrahimi nextItem->size -= sizeDiff;
8263*b7893ccfSSadaf Ebrahimi RegisterFreeSuballocation(nextItem);
8264*b7893ccfSSadaf Ebrahimi }
8265*b7893ccfSSadaf Ebrahimi // There is exactly the amount of free space required.
8266*b7893ccfSSadaf Ebrahimi else
8267*b7893ccfSSadaf Ebrahimi {
8268*b7893ccfSSadaf Ebrahimi // Remove this next free item.
8269*b7893ccfSSadaf Ebrahimi UnregisterFreeSuballocation(nextItem);
8270*b7893ccfSSadaf Ebrahimi m_Suballocations.erase(nextItem);
8271*b7893ccfSSadaf Ebrahimi --m_FreeCount;
8272*b7893ccfSSadaf Ebrahimi }
8273*b7893ccfSSadaf Ebrahimi }
8274*b7893ccfSSadaf Ebrahimi // Next item is not free - there is no space to grow.
8275*b7893ccfSSadaf Ebrahimi else
8276*b7893ccfSSadaf Ebrahimi {
8277*b7893ccfSSadaf Ebrahimi return false;
8278*b7893ccfSSadaf Ebrahimi }
8279*b7893ccfSSadaf Ebrahimi }
8280*b7893ccfSSadaf Ebrahimi // This is the last item - there is no space to grow.
8281*b7893ccfSSadaf Ebrahimi else
8282*b7893ccfSSadaf Ebrahimi {
8283*b7893ccfSSadaf Ebrahimi return false;
8284*b7893ccfSSadaf Ebrahimi }
8285*b7893ccfSSadaf Ebrahimi
8286*b7893ccfSSadaf Ebrahimi suballoc.size = newSize;
8287*b7893ccfSSadaf Ebrahimi m_SumFreeSize -= sizeDiff;
8288*b7893ccfSSadaf Ebrahimi }
8289*b7893ccfSSadaf Ebrahimi
8290*b7893ccfSSadaf Ebrahimi // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8291*b7893ccfSSadaf Ebrahimi return true;
8292*b7893ccfSSadaf Ebrahimi }
8293*b7893ccfSSadaf Ebrahimi }
8294*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "Not found!");
8295*b7893ccfSSadaf Ebrahimi return false;
8296*b7893ccfSSadaf Ebrahimi }
8297*b7893ccfSSadaf Ebrahimi
ValidateFreeSuballocationList()8298*b7893ccfSSadaf Ebrahimi bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8299*b7893ccfSSadaf Ebrahimi {
8300*b7893ccfSSadaf Ebrahimi VkDeviceSize lastSize = 0;
8301*b7893ccfSSadaf Ebrahimi for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8302*b7893ccfSSadaf Ebrahimi {
8303*b7893ccfSSadaf Ebrahimi const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8304*b7893ccfSSadaf Ebrahimi
8305*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8306*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8307*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(it->size >= lastSize);
8308*b7893ccfSSadaf Ebrahimi lastSize = it->size;
8309*b7893ccfSSadaf Ebrahimi }
8310*b7893ccfSSadaf Ebrahimi return true;
8311*b7893ccfSSadaf Ebrahimi }
8312*b7893ccfSSadaf Ebrahimi
CheckAllocation(uint32_t currentFrameIndex,uint32_t frameInUseCount,VkDeviceSize bufferImageGranularity,VkDeviceSize allocSize,VkDeviceSize allocAlignment,VmaSuballocationType allocType,VmaSuballocationList::const_iterator suballocItem,bool canMakeOtherLost,VkDeviceSize * pOffset,size_t * itemsToMakeLostCount,VkDeviceSize * pSumFreeSize,VkDeviceSize * pSumItemSize)8313*b7893ccfSSadaf Ebrahimi bool VmaBlockMetadata_Generic::CheckAllocation(
8314*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
8315*b7893ccfSSadaf Ebrahimi uint32_t frameInUseCount,
8316*b7893ccfSSadaf Ebrahimi VkDeviceSize bufferImageGranularity,
8317*b7893ccfSSadaf Ebrahimi VkDeviceSize allocSize,
8318*b7893ccfSSadaf Ebrahimi VkDeviceSize allocAlignment,
8319*b7893ccfSSadaf Ebrahimi VmaSuballocationType allocType,
8320*b7893ccfSSadaf Ebrahimi VmaSuballocationList::const_iterator suballocItem,
8321*b7893ccfSSadaf Ebrahimi bool canMakeOtherLost,
8322*b7893ccfSSadaf Ebrahimi VkDeviceSize* pOffset,
8323*b7893ccfSSadaf Ebrahimi size_t* itemsToMakeLostCount,
8324*b7893ccfSSadaf Ebrahimi VkDeviceSize* pSumFreeSize,
8325*b7893ccfSSadaf Ebrahimi VkDeviceSize* pSumItemSize) const
8326*b7893ccfSSadaf Ebrahimi {
8327*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocSize > 0);
8328*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8329*b7893ccfSSadaf Ebrahimi VMA_ASSERT(suballocItem != m_Suballocations.cend());
8330*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pOffset != VMA_NULL);
8331*b7893ccfSSadaf Ebrahimi
8332*b7893ccfSSadaf Ebrahimi *itemsToMakeLostCount = 0;
8333*b7893ccfSSadaf Ebrahimi *pSumFreeSize = 0;
8334*b7893ccfSSadaf Ebrahimi *pSumItemSize = 0;
8335*b7893ccfSSadaf Ebrahimi
8336*b7893ccfSSadaf Ebrahimi if(canMakeOtherLost)
8337*b7893ccfSSadaf Ebrahimi {
8338*b7893ccfSSadaf Ebrahimi if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8339*b7893ccfSSadaf Ebrahimi {
8340*b7893ccfSSadaf Ebrahimi *pSumFreeSize = suballocItem->size;
8341*b7893ccfSSadaf Ebrahimi }
8342*b7893ccfSSadaf Ebrahimi else
8343*b7893ccfSSadaf Ebrahimi {
8344*b7893ccfSSadaf Ebrahimi if(suballocItem->hAllocation->CanBecomeLost() &&
8345*b7893ccfSSadaf Ebrahimi suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8346*b7893ccfSSadaf Ebrahimi {
8347*b7893ccfSSadaf Ebrahimi ++*itemsToMakeLostCount;
8348*b7893ccfSSadaf Ebrahimi *pSumItemSize = suballocItem->size;
8349*b7893ccfSSadaf Ebrahimi }
8350*b7893ccfSSadaf Ebrahimi else
8351*b7893ccfSSadaf Ebrahimi {
8352*b7893ccfSSadaf Ebrahimi return false;
8353*b7893ccfSSadaf Ebrahimi }
8354*b7893ccfSSadaf Ebrahimi }
8355*b7893ccfSSadaf Ebrahimi
8356*b7893ccfSSadaf Ebrahimi // Remaining size is too small for this request: Early return.
8357*b7893ccfSSadaf Ebrahimi if(GetSize() - suballocItem->offset < allocSize)
8358*b7893ccfSSadaf Ebrahimi {
8359*b7893ccfSSadaf Ebrahimi return false;
8360*b7893ccfSSadaf Ebrahimi }
8361*b7893ccfSSadaf Ebrahimi
8362*b7893ccfSSadaf Ebrahimi // Start from offset equal to beginning of this suballocation.
8363*b7893ccfSSadaf Ebrahimi *pOffset = suballocItem->offset;
8364*b7893ccfSSadaf Ebrahimi
8365*b7893ccfSSadaf Ebrahimi // Apply VMA_DEBUG_MARGIN at the beginning.
8366*b7893ccfSSadaf Ebrahimi if(VMA_DEBUG_MARGIN > 0)
8367*b7893ccfSSadaf Ebrahimi {
8368*b7893ccfSSadaf Ebrahimi *pOffset += VMA_DEBUG_MARGIN;
8369*b7893ccfSSadaf Ebrahimi }
8370*b7893ccfSSadaf Ebrahimi
8371*b7893ccfSSadaf Ebrahimi // Apply alignment.
8372*b7893ccfSSadaf Ebrahimi *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8373*b7893ccfSSadaf Ebrahimi
8374*b7893ccfSSadaf Ebrahimi // Check previous suballocations for BufferImageGranularity conflicts.
8375*b7893ccfSSadaf Ebrahimi // Make bigger alignment if necessary.
8376*b7893ccfSSadaf Ebrahimi if(bufferImageGranularity > 1)
8377*b7893ccfSSadaf Ebrahimi {
8378*b7893ccfSSadaf Ebrahimi bool bufferImageGranularityConflict = false;
8379*b7893ccfSSadaf Ebrahimi VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8380*b7893ccfSSadaf Ebrahimi while(prevSuballocItem != m_Suballocations.cbegin())
8381*b7893ccfSSadaf Ebrahimi {
8382*b7893ccfSSadaf Ebrahimi --prevSuballocItem;
8383*b7893ccfSSadaf Ebrahimi const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8384*b7893ccfSSadaf Ebrahimi if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8385*b7893ccfSSadaf Ebrahimi {
8386*b7893ccfSSadaf Ebrahimi if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8387*b7893ccfSSadaf Ebrahimi {
8388*b7893ccfSSadaf Ebrahimi bufferImageGranularityConflict = true;
8389*b7893ccfSSadaf Ebrahimi break;
8390*b7893ccfSSadaf Ebrahimi }
8391*b7893ccfSSadaf Ebrahimi }
8392*b7893ccfSSadaf Ebrahimi else
8393*b7893ccfSSadaf Ebrahimi // Already on previous page.
8394*b7893ccfSSadaf Ebrahimi break;
8395*b7893ccfSSadaf Ebrahimi }
8396*b7893ccfSSadaf Ebrahimi if(bufferImageGranularityConflict)
8397*b7893ccfSSadaf Ebrahimi {
8398*b7893ccfSSadaf Ebrahimi *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8399*b7893ccfSSadaf Ebrahimi }
8400*b7893ccfSSadaf Ebrahimi }
8401*b7893ccfSSadaf Ebrahimi
8402*b7893ccfSSadaf Ebrahimi // Now that we have final *pOffset, check if we are past suballocItem.
8403*b7893ccfSSadaf Ebrahimi // If yes, return false - this function should be called for another suballocItem as starting point.
8404*b7893ccfSSadaf Ebrahimi if(*pOffset >= suballocItem->offset + suballocItem->size)
8405*b7893ccfSSadaf Ebrahimi {
8406*b7893ccfSSadaf Ebrahimi return false;
8407*b7893ccfSSadaf Ebrahimi }
8408*b7893ccfSSadaf Ebrahimi
8409*b7893ccfSSadaf Ebrahimi // Calculate padding at the beginning based on current offset.
8410*b7893ccfSSadaf Ebrahimi const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8411*b7893ccfSSadaf Ebrahimi
8412*b7893ccfSSadaf Ebrahimi // Calculate required margin at the end.
8413*b7893ccfSSadaf Ebrahimi const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8414*b7893ccfSSadaf Ebrahimi
8415*b7893ccfSSadaf Ebrahimi const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8416*b7893ccfSSadaf Ebrahimi // Another early return check.
8417*b7893ccfSSadaf Ebrahimi if(suballocItem->offset + totalSize > GetSize())
8418*b7893ccfSSadaf Ebrahimi {
8419*b7893ccfSSadaf Ebrahimi return false;
8420*b7893ccfSSadaf Ebrahimi }
8421*b7893ccfSSadaf Ebrahimi
8422*b7893ccfSSadaf Ebrahimi // Advance lastSuballocItem until desired size is reached.
8423*b7893ccfSSadaf Ebrahimi // Update itemsToMakeLostCount.
8424*b7893ccfSSadaf Ebrahimi VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8425*b7893ccfSSadaf Ebrahimi if(totalSize > suballocItem->size)
8426*b7893ccfSSadaf Ebrahimi {
8427*b7893ccfSSadaf Ebrahimi VkDeviceSize remainingSize = totalSize - suballocItem->size;
8428*b7893ccfSSadaf Ebrahimi while(remainingSize > 0)
8429*b7893ccfSSadaf Ebrahimi {
8430*b7893ccfSSadaf Ebrahimi ++lastSuballocItem;
8431*b7893ccfSSadaf Ebrahimi if(lastSuballocItem == m_Suballocations.cend())
8432*b7893ccfSSadaf Ebrahimi {
8433*b7893ccfSSadaf Ebrahimi return false;
8434*b7893ccfSSadaf Ebrahimi }
8435*b7893ccfSSadaf Ebrahimi if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8436*b7893ccfSSadaf Ebrahimi {
8437*b7893ccfSSadaf Ebrahimi *pSumFreeSize += lastSuballocItem->size;
8438*b7893ccfSSadaf Ebrahimi }
8439*b7893ccfSSadaf Ebrahimi else
8440*b7893ccfSSadaf Ebrahimi {
8441*b7893ccfSSadaf Ebrahimi VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8442*b7893ccfSSadaf Ebrahimi if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8443*b7893ccfSSadaf Ebrahimi lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8444*b7893ccfSSadaf Ebrahimi {
8445*b7893ccfSSadaf Ebrahimi ++*itemsToMakeLostCount;
8446*b7893ccfSSadaf Ebrahimi *pSumItemSize += lastSuballocItem->size;
8447*b7893ccfSSadaf Ebrahimi }
8448*b7893ccfSSadaf Ebrahimi else
8449*b7893ccfSSadaf Ebrahimi {
8450*b7893ccfSSadaf Ebrahimi return false;
8451*b7893ccfSSadaf Ebrahimi }
8452*b7893ccfSSadaf Ebrahimi }
8453*b7893ccfSSadaf Ebrahimi remainingSize = (lastSuballocItem->size < remainingSize) ?
8454*b7893ccfSSadaf Ebrahimi remainingSize - lastSuballocItem->size : 0;
8455*b7893ccfSSadaf Ebrahimi }
8456*b7893ccfSSadaf Ebrahimi }
8457*b7893ccfSSadaf Ebrahimi
8458*b7893ccfSSadaf Ebrahimi // Check next suballocations for BufferImageGranularity conflicts.
8459*b7893ccfSSadaf Ebrahimi // If conflict exists, we must mark more allocations lost or fail.
8460*b7893ccfSSadaf Ebrahimi if(bufferImageGranularity > 1)
8461*b7893ccfSSadaf Ebrahimi {
8462*b7893ccfSSadaf Ebrahimi VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8463*b7893ccfSSadaf Ebrahimi ++nextSuballocItem;
8464*b7893ccfSSadaf Ebrahimi while(nextSuballocItem != m_Suballocations.cend())
8465*b7893ccfSSadaf Ebrahimi {
8466*b7893ccfSSadaf Ebrahimi const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8467*b7893ccfSSadaf Ebrahimi if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8468*b7893ccfSSadaf Ebrahimi {
8469*b7893ccfSSadaf Ebrahimi if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8470*b7893ccfSSadaf Ebrahimi {
8471*b7893ccfSSadaf Ebrahimi VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8472*b7893ccfSSadaf Ebrahimi if(nextSuballoc.hAllocation->CanBecomeLost() &&
8473*b7893ccfSSadaf Ebrahimi nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8474*b7893ccfSSadaf Ebrahimi {
8475*b7893ccfSSadaf Ebrahimi ++*itemsToMakeLostCount;
8476*b7893ccfSSadaf Ebrahimi }
8477*b7893ccfSSadaf Ebrahimi else
8478*b7893ccfSSadaf Ebrahimi {
8479*b7893ccfSSadaf Ebrahimi return false;
8480*b7893ccfSSadaf Ebrahimi }
8481*b7893ccfSSadaf Ebrahimi }
8482*b7893ccfSSadaf Ebrahimi }
8483*b7893ccfSSadaf Ebrahimi else
8484*b7893ccfSSadaf Ebrahimi {
8485*b7893ccfSSadaf Ebrahimi // Already on next page.
8486*b7893ccfSSadaf Ebrahimi break;
8487*b7893ccfSSadaf Ebrahimi }
8488*b7893ccfSSadaf Ebrahimi ++nextSuballocItem;
8489*b7893ccfSSadaf Ebrahimi }
8490*b7893ccfSSadaf Ebrahimi }
8491*b7893ccfSSadaf Ebrahimi }
8492*b7893ccfSSadaf Ebrahimi else
8493*b7893ccfSSadaf Ebrahimi {
8494*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = *suballocItem;
8495*b7893ccfSSadaf Ebrahimi VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8496*b7893ccfSSadaf Ebrahimi
8497*b7893ccfSSadaf Ebrahimi *pSumFreeSize = suballoc.size;
8498*b7893ccfSSadaf Ebrahimi
8499*b7893ccfSSadaf Ebrahimi // Size of this suballocation is too small for this request: Early return.
8500*b7893ccfSSadaf Ebrahimi if(suballoc.size < allocSize)
8501*b7893ccfSSadaf Ebrahimi {
8502*b7893ccfSSadaf Ebrahimi return false;
8503*b7893ccfSSadaf Ebrahimi }
8504*b7893ccfSSadaf Ebrahimi
8505*b7893ccfSSadaf Ebrahimi // Start from offset equal to beginning of this suballocation.
8506*b7893ccfSSadaf Ebrahimi *pOffset = suballoc.offset;
8507*b7893ccfSSadaf Ebrahimi
8508*b7893ccfSSadaf Ebrahimi // Apply VMA_DEBUG_MARGIN at the beginning.
8509*b7893ccfSSadaf Ebrahimi if(VMA_DEBUG_MARGIN > 0)
8510*b7893ccfSSadaf Ebrahimi {
8511*b7893ccfSSadaf Ebrahimi *pOffset += VMA_DEBUG_MARGIN;
8512*b7893ccfSSadaf Ebrahimi }
8513*b7893ccfSSadaf Ebrahimi
8514*b7893ccfSSadaf Ebrahimi // Apply alignment.
8515*b7893ccfSSadaf Ebrahimi *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8516*b7893ccfSSadaf Ebrahimi
8517*b7893ccfSSadaf Ebrahimi // Check previous suballocations for BufferImageGranularity conflicts.
8518*b7893ccfSSadaf Ebrahimi // Make bigger alignment if necessary.
8519*b7893ccfSSadaf Ebrahimi if(bufferImageGranularity > 1)
8520*b7893ccfSSadaf Ebrahimi {
8521*b7893ccfSSadaf Ebrahimi bool bufferImageGranularityConflict = false;
8522*b7893ccfSSadaf Ebrahimi VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8523*b7893ccfSSadaf Ebrahimi while(prevSuballocItem != m_Suballocations.cbegin())
8524*b7893ccfSSadaf Ebrahimi {
8525*b7893ccfSSadaf Ebrahimi --prevSuballocItem;
8526*b7893ccfSSadaf Ebrahimi const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8527*b7893ccfSSadaf Ebrahimi if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8528*b7893ccfSSadaf Ebrahimi {
8529*b7893ccfSSadaf Ebrahimi if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8530*b7893ccfSSadaf Ebrahimi {
8531*b7893ccfSSadaf Ebrahimi bufferImageGranularityConflict = true;
8532*b7893ccfSSadaf Ebrahimi break;
8533*b7893ccfSSadaf Ebrahimi }
8534*b7893ccfSSadaf Ebrahimi }
8535*b7893ccfSSadaf Ebrahimi else
8536*b7893ccfSSadaf Ebrahimi // Already on previous page.
8537*b7893ccfSSadaf Ebrahimi break;
8538*b7893ccfSSadaf Ebrahimi }
8539*b7893ccfSSadaf Ebrahimi if(bufferImageGranularityConflict)
8540*b7893ccfSSadaf Ebrahimi {
8541*b7893ccfSSadaf Ebrahimi *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8542*b7893ccfSSadaf Ebrahimi }
8543*b7893ccfSSadaf Ebrahimi }
8544*b7893ccfSSadaf Ebrahimi
8545*b7893ccfSSadaf Ebrahimi // Calculate padding at the beginning based on current offset.
8546*b7893ccfSSadaf Ebrahimi const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8547*b7893ccfSSadaf Ebrahimi
8548*b7893ccfSSadaf Ebrahimi // Calculate required margin at the end.
8549*b7893ccfSSadaf Ebrahimi const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8550*b7893ccfSSadaf Ebrahimi
8551*b7893ccfSSadaf Ebrahimi // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8552*b7893ccfSSadaf Ebrahimi if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8553*b7893ccfSSadaf Ebrahimi {
8554*b7893ccfSSadaf Ebrahimi return false;
8555*b7893ccfSSadaf Ebrahimi }
8556*b7893ccfSSadaf Ebrahimi
8557*b7893ccfSSadaf Ebrahimi // Check next suballocations for BufferImageGranularity conflicts.
8558*b7893ccfSSadaf Ebrahimi // If conflict exists, allocation cannot be made here.
8559*b7893ccfSSadaf Ebrahimi if(bufferImageGranularity > 1)
8560*b7893ccfSSadaf Ebrahimi {
8561*b7893ccfSSadaf Ebrahimi VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8562*b7893ccfSSadaf Ebrahimi ++nextSuballocItem;
8563*b7893ccfSSadaf Ebrahimi while(nextSuballocItem != m_Suballocations.cend())
8564*b7893ccfSSadaf Ebrahimi {
8565*b7893ccfSSadaf Ebrahimi const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8566*b7893ccfSSadaf Ebrahimi if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8567*b7893ccfSSadaf Ebrahimi {
8568*b7893ccfSSadaf Ebrahimi if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8569*b7893ccfSSadaf Ebrahimi {
8570*b7893ccfSSadaf Ebrahimi return false;
8571*b7893ccfSSadaf Ebrahimi }
8572*b7893ccfSSadaf Ebrahimi }
8573*b7893ccfSSadaf Ebrahimi else
8574*b7893ccfSSadaf Ebrahimi {
8575*b7893ccfSSadaf Ebrahimi // Already on next page.
8576*b7893ccfSSadaf Ebrahimi break;
8577*b7893ccfSSadaf Ebrahimi }
8578*b7893ccfSSadaf Ebrahimi ++nextSuballocItem;
8579*b7893ccfSSadaf Ebrahimi }
8580*b7893ccfSSadaf Ebrahimi }
8581*b7893ccfSSadaf Ebrahimi }
8582*b7893ccfSSadaf Ebrahimi
8583*b7893ccfSSadaf Ebrahimi // All tests passed: Success. pOffset is already filled.
8584*b7893ccfSSadaf Ebrahimi return true;
8585*b7893ccfSSadaf Ebrahimi }
8586*b7893ccfSSadaf Ebrahimi
MergeFreeWithNext(VmaSuballocationList::iterator item)8587*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8588*b7893ccfSSadaf Ebrahimi {
8589*b7893ccfSSadaf Ebrahimi VMA_ASSERT(item != m_Suballocations.end());
8590*b7893ccfSSadaf Ebrahimi VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8591*b7893ccfSSadaf Ebrahimi
8592*b7893ccfSSadaf Ebrahimi VmaSuballocationList::iterator nextItem = item;
8593*b7893ccfSSadaf Ebrahimi ++nextItem;
8594*b7893ccfSSadaf Ebrahimi VMA_ASSERT(nextItem != m_Suballocations.end());
8595*b7893ccfSSadaf Ebrahimi VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8596*b7893ccfSSadaf Ebrahimi
8597*b7893ccfSSadaf Ebrahimi item->size += nextItem->size;
8598*b7893ccfSSadaf Ebrahimi --m_FreeCount;
8599*b7893ccfSSadaf Ebrahimi m_Suballocations.erase(nextItem);
8600*b7893ccfSSadaf Ebrahimi }
8601*b7893ccfSSadaf Ebrahimi
FreeSuballocation(VmaSuballocationList::iterator suballocItem)8602*b7893ccfSSadaf Ebrahimi VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8603*b7893ccfSSadaf Ebrahimi {
8604*b7893ccfSSadaf Ebrahimi // Change this suballocation to be marked as free.
8605*b7893ccfSSadaf Ebrahimi VmaSuballocation& suballoc = *suballocItem;
8606*b7893ccfSSadaf Ebrahimi suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8607*b7893ccfSSadaf Ebrahimi suballoc.hAllocation = VK_NULL_HANDLE;
8608*b7893ccfSSadaf Ebrahimi
8609*b7893ccfSSadaf Ebrahimi // Update totals.
8610*b7893ccfSSadaf Ebrahimi ++m_FreeCount;
8611*b7893ccfSSadaf Ebrahimi m_SumFreeSize += suballoc.size;
8612*b7893ccfSSadaf Ebrahimi
8613*b7893ccfSSadaf Ebrahimi // Merge with previous and/or next suballocation if it's also free.
8614*b7893ccfSSadaf Ebrahimi bool mergeWithNext = false;
8615*b7893ccfSSadaf Ebrahimi bool mergeWithPrev = false;
8616*b7893ccfSSadaf Ebrahimi
8617*b7893ccfSSadaf Ebrahimi VmaSuballocationList::iterator nextItem = suballocItem;
8618*b7893ccfSSadaf Ebrahimi ++nextItem;
8619*b7893ccfSSadaf Ebrahimi if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8620*b7893ccfSSadaf Ebrahimi {
8621*b7893ccfSSadaf Ebrahimi mergeWithNext = true;
8622*b7893ccfSSadaf Ebrahimi }
8623*b7893ccfSSadaf Ebrahimi
8624*b7893ccfSSadaf Ebrahimi VmaSuballocationList::iterator prevItem = suballocItem;
8625*b7893ccfSSadaf Ebrahimi if(suballocItem != m_Suballocations.begin())
8626*b7893ccfSSadaf Ebrahimi {
8627*b7893ccfSSadaf Ebrahimi --prevItem;
8628*b7893ccfSSadaf Ebrahimi if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8629*b7893ccfSSadaf Ebrahimi {
8630*b7893ccfSSadaf Ebrahimi mergeWithPrev = true;
8631*b7893ccfSSadaf Ebrahimi }
8632*b7893ccfSSadaf Ebrahimi }
8633*b7893ccfSSadaf Ebrahimi
8634*b7893ccfSSadaf Ebrahimi if(mergeWithNext)
8635*b7893ccfSSadaf Ebrahimi {
8636*b7893ccfSSadaf Ebrahimi UnregisterFreeSuballocation(nextItem);
8637*b7893ccfSSadaf Ebrahimi MergeFreeWithNext(suballocItem);
8638*b7893ccfSSadaf Ebrahimi }
8639*b7893ccfSSadaf Ebrahimi
8640*b7893ccfSSadaf Ebrahimi if(mergeWithPrev)
8641*b7893ccfSSadaf Ebrahimi {
8642*b7893ccfSSadaf Ebrahimi UnregisterFreeSuballocation(prevItem);
8643*b7893ccfSSadaf Ebrahimi MergeFreeWithNext(prevItem);
8644*b7893ccfSSadaf Ebrahimi RegisterFreeSuballocation(prevItem);
8645*b7893ccfSSadaf Ebrahimi return prevItem;
8646*b7893ccfSSadaf Ebrahimi }
8647*b7893ccfSSadaf Ebrahimi else
8648*b7893ccfSSadaf Ebrahimi {
8649*b7893ccfSSadaf Ebrahimi RegisterFreeSuballocation(suballocItem);
8650*b7893ccfSSadaf Ebrahimi return suballocItem;
8651*b7893ccfSSadaf Ebrahimi }
8652*b7893ccfSSadaf Ebrahimi }
8653*b7893ccfSSadaf Ebrahimi
RegisterFreeSuballocation(VmaSuballocationList::iterator item)8654*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8655*b7893ccfSSadaf Ebrahimi {
8656*b7893ccfSSadaf Ebrahimi VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8657*b7893ccfSSadaf Ebrahimi VMA_ASSERT(item->size > 0);
8658*b7893ccfSSadaf Ebrahimi
8659*b7893ccfSSadaf Ebrahimi // You may want to enable this validation at the beginning or at the end of
8660*b7893ccfSSadaf Ebrahimi // this function, depending on what do you want to check.
8661*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8662*b7893ccfSSadaf Ebrahimi
8663*b7893ccfSSadaf Ebrahimi if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8664*b7893ccfSSadaf Ebrahimi {
8665*b7893ccfSSadaf Ebrahimi if(m_FreeSuballocationsBySize.empty())
8666*b7893ccfSSadaf Ebrahimi {
8667*b7893ccfSSadaf Ebrahimi m_FreeSuballocationsBySize.push_back(item);
8668*b7893ccfSSadaf Ebrahimi }
8669*b7893ccfSSadaf Ebrahimi else
8670*b7893ccfSSadaf Ebrahimi {
8671*b7893ccfSSadaf Ebrahimi VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8672*b7893ccfSSadaf Ebrahimi }
8673*b7893ccfSSadaf Ebrahimi }
8674*b7893ccfSSadaf Ebrahimi
8675*b7893ccfSSadaf Ebrahimi //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8676*b7893ccfSSadaf Ebrahimi }
8677*b7893ccfSSadaf Ebrahimi
8678*b7893ccfSSadaf Ebrahimi
UnregisterFreeSuballocation(VmaSuballocationList::iterator item)8679*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8680*b7893ccfSSadaf Ebrahimi {
8681*b7893ccfSSadaf Ebrahimi VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8682*b7893ccfSSadaf Ebrahimi VMA_ASSERT(item->size > 0);
8683*b7893ccfSSadaf Ebrahimi
8684*b7893ccfSSadaf Ebrahimi // You may want to enable this validation at the beginning or at the end of
8685*b7893ccfSSadaf Ebrahimi // this function, depending on what do you want to check.
8686*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8687*b7893ccfSSadaf Ebrahimi
8688*b7893ccfSSadaf Ebrahimi if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8689*b7893ccfSSadaf Ebrahimi {
8690*b7893ccfSSadaf Ebrahimi VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8691*b7893ccfSSadaf Ebrahimi m_FreeSuballocationsBySize.data(),
8692*b7893ccfSSadaf Ebrahimi m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8693*b7893ccfSSadaf Ebrahimi item,
8694*b7893ccfSSadaf Ebrahimi VmaSuballocationItemSizeLess());
8695*b7893ccfSSadaf Ebrahimi for(size_t index = it - m_FreeSuballocationsBySize.data();
8696*b7893ccfSSadaf Ebrahimi index < m_FreeSuballocationsBySize.size();
8697*b7893ccfSSadaf Ebrahimi ++index)
8698*b7893ccfSSadaf Ebrahimi {
8699*b7893ccfSSadaf Ebrahimi if(m_FreeSuballocationsBySize[index] == item)
8700*b7893ccfSSadaf Ebrahimi {
8701*b7893ccfSSadaf Ebrahimi VmaVectorRemove(m_FreeSuballocationsBySize, index);
8702*b7893ccfSSadaf Ebrahimi return;
8703*b7893ccfSSadaf Ebrahimi }
8704*b7893ccfSSadaf Ebrahimi VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8705*b7893ccfSSadaf Ebrahimi }
8706*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "Not found.");
8707*b7893ccfSSadaf Ebrahimi }
8708*b7893ccfSSadaf Ebrahimi
8709*b7893ccfSSadaf Ebrahimi //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8710*b7893ccfSSadaf Ebrahimi }
8711*b7893ccfSSadaf Ebrahimi
IsBufferImageGranularityConflictPossible(VkDeviceSize bufferImageGranularity,VmaSuballocationType & inOutPrevSuballocType)8712*b7893ccfSSadaf Ebrahimi bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8713*b7893ccfSSadaf Ebrahimi VkDeviceSize bufferImageGranularity,
8714*b7893ccfSSadaf Ebrahimi VmaSuballocationType& inOutPrevSuballocType) const
8715*b7893ccfSSadaf Ebrahimi {
8716*b7893ccfSSadaf Ebrahimi if(bufferImageGranularity == 1 || IsEmpty())
8717*b7893ccfSSadaf Ebrahimi {
8718*b7893ccfSSadaf Ebrahimi return false;
8719*b7893ccfSSadaf Ebrahimi }
8720*b7893ccfSSadaf Ebrahimi
8721*b7893ccfSSadaf Ebrahimi VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8722*b7893ccfSSadaf Ebrahimi bool typeConflictFound = false;
8723*b7893ccfSSadaf Ebrahimi for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8724*b7893ccfSSadaf Ebrahimi it != m_Suballocations.cend();
8725*b7893ccfSSadaf Ebrahimi ++it)
8726*b7893ccfSSadaf Ebrahimi {
8727*b7893ccfSSadaf Ebrahimi const VmaSuballocationType suballocType = it->type;
8728*b7893ccfSSadaf Ebrahimi if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8729*b7893ccfSSadaf Ebrahimi {
8730*b7893ccfSSadaf Ebrahimi minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8731*b7893ccfSSadaf Ebrahimi if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8732*b7893ccfSSadaf Ebrahimi {
8733*b7893ccfSSadaf Ebrahimi typeConflictFound = true;
8734*b7893ccfSSadaf Ebrahimi }
8735*b7893ccfSSadaf Ebrahimi inOutPrevSuballocType = suballocType;
8736*b7893ccfSSadaf Ebrahimi }
8737*b7893ccfSSadaf Ebrahimi }
8738*b7893ccfSSadaf Ebrahimi
8739*b7893ccfSSadaf Ebrahimi return typeConflictFound || minAlignment >= bufferImageGranularity;
8740*b7893ccfSSadaf Ebrahimi }
8741*b7893ccfSSadaf Ebrahimi
8742*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
8743*b7893ccfSSadaf Ebrahimi // class VmaBlockMetadata_Linear
8744*b7893ccfSSadaf Ebrahimi
VmaBlockMetadata_Linear(VmaAllocator hAllocator)8745*b7893ccfSSadaf Ebrahimi VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8746*b7893ccfSSadaf Ebrahimi VmaBlockMetadata(hAllocator),
8747*b7893ccfSSadaf Ebrahimi m_SumFreeSize(0),
8748*b7893ccfSSadaf Ebrahimi m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8749*b7893ccfSSadaf Ebrahimi m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8750*b7893ccfSSadaf Ebrahimi m_1stVectorIndex(0),
8751*b7893ccfSSadaf Ebrahimi m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8752*b7893ccfSSadaf Ebrahimi m_1stNullItemsBeginCount(0),
8753*b7893ccfSSadaf Ebrahimi m_1stNullItemsMiddleCount(0),
8754*b7893ccfSSadaf Ebrahimi m_2ndNullItemsCount(0)
8755*b7893ccfSSadaf Ebrahimi {
8756*b7893ccfSSadaf Ebrahimi }
8757*b7893ccfSSadaf Ebrahimi
~VmaBlockMetadata_Linear()8758*b7893ccfSSadaf Ebrahimi VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8759*b7893ccfSSadaf Ebrahimi {
8760*b7893ccfSSadaf Ebrahimi }
8761*b7893ccfSSadaf Ebrahimi
Init(VkDeviceSize size)8762*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8763*b7893ccfSSadaf Ebrahimi {
8764*b7893ccfSSadaf Ebrahimi VmaBlockMetadata::Init(size);
8765*b7893ccfSSadaf Ebrahimi m_SumFreeSize = size;
8766*b7893ccfSSadaf Ebrahimi }
8767*b7893ccfSSadaf Ebrahimi
Validate()8768*b7893ccfSSadaf Ebrahimi bool VmaBlockMetadata_Linear::Validate() const
8769*b7893ccfSSadaf Ebrahimi {
8770*b7893ccfSSadaf Ebrahimi const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8771*b7893ccfSSadaf Ebrahimi const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8772*b7893ccfSSadaf Ebrahimi
8773*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8774*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(!suballocations1st.empty() ||
8775*b7893ccfSSadaf Ebrahimi suballocations2nd.empty() ||
8776*b7893ccfSSadaf Ebrahimi m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8777*b7893ccfSSadaf Ebrahimi
8778*b7893ccfSSadaf Ebrahimi if(!suballocations1st.empty())
8779*b7893ccfSSadaf Ebrahimi {
8780*b7893ccfSSadaf Ebrahimi // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8781*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8782*b7893ccfSSadaf Ebrahimi // Null item at the end should be just pop_back().
8783*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8784*b7893ccfSSadaf Ebrahimi }
8785*b7893ccfSSadaf Ebrahimi if(!suballocations2nd.empty())
8786*b7893ccfSSadaf Ebrahimi {
8787*b7893ccfSSadaf Ebrahimi // Null item at the end should be just pop_back().
8788*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8789*b7893ccfSSadaf Ebrahimi }
8790*b7893ccfSSadaf Ebrahimi
8791*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8792*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8793*b7893ccfSSadaf Ebrahimi
8794*b7893ccfSSadaf Ebrahimi VkDeviceSize sumUsedSize = 0;
8795*b7893ccfSSadaf Ebrahimi const size_t suballoc1stCount = suballocations1st.size();
8796*b7893ccfSSadaf Ebrahimi VkDeviceSize offset = VMA_DEBUG_MARGIN;
8797*b7893ccfSSadaf Ebrahimi
8798*b7893ccfSSadaf Ebrahimi if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8799*b7893ccfSSadaf Ebrahimi {
8800*b7893ccfSSadaf Ebrahimi const size_t suballoc2ndCount = suballocations2nd.size();
8801*b7893ccfSSadaf Ebrahimi size_t nullItem2ndCount = 0;
8802*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < suballoc2ndCount; ++i)
8803*b7893ccfSSadaf Ebrahimi {
8804*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = suballocations2nd[i];
8805*b7893ccfSSadaf Ebrahimi const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8806*b7893ccfSSadaf Ebrahimi
8807*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8808*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(suballoc.offset >= offset);
8809*b7893ccfSSadaf Ebrahimi
8810*b7893ccfSSadaf Ebrahimi if(!currFree)
8811*b7893ccfSSadaf Ebrahimi {
8812*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8813*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8814*b7893ccfSSadaf Ebrahimi sumUsedSize += suballoc.size;
8815*b7893ccfSSadaf Ebrahimi }
8816*b7893ccfSSadaf Ebrahimi else
8817*b7893ccfSSadaf Ebrahimi {
8818*b7893ccfSSadaf Ebrahimi ++nullItem2ndCount;
8819*b7893ccfSSadaf Ebrahimi }
8820*b7893ccfSSadaf Ebrahimi
8821*b7893ccfSSadaf Ebrahimi offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8822*b7893ccfSSadaf Ebrahimi }
8823*b7893ccfSSadaf Ebrahimi
8824*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8825*b7893ccfSSadaf Ebrahimi }
8826*b7893ccfSSadaf Ebrahimi
8827*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8828*b7893ccfSSadaf Ebrahimi {
8829*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = suballocations1st[i];
8830*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8831*b7893ccfSSadaf Ebrahimi suballoc.hAllocation == VK_NULL_HANDLE);
8832*b7893ccfSSadaf Ebrahimi }
8833*b7893ccfSSadaf Ebrahimi
8834*b7893ccfSSadaf Ebrahimi size_t nullItem1stCount = m_1stNullItemsBeginCount;
8835*b7893ccfSSadaf Ebrahimi
8836*b7893ccfSSadaf Ebrahimi for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8837*b7893ccfSSadaf Ebrahimi {
8838*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = suballocations1st[i];
8839*b7893ccfSSadaf Ebrahimi const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8840*b7893ccfSSadaf Ebrahimi
8841*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8842*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(suballoc.offset >= offset);
8843*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8844*b7893ccfSSadaf Ebrahimi
8845*b7893ccfSSadaf Ebrahimi if(!currFree)
8846*b7893ccfSSadaf Ebrahimi {
8847*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8848*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8849*b7893ccfSSadaf Ebrahimi sumUsedSize += suballoc.size;
8850*b7893ccfSSadaf Ebrahimi }
8851*b7893ccfSSadaf Ebrahimi else
8852*b7893ccfSSadaf Ebrahimi {
8853*b7893ccfSSadaf Ebrahimi ++nullItem1stCount;
8854*b7893ccfSSadaf Ebrahimi }
8855*b7893ccfSSadaf Ebrahimi
8856*b7893ccfSSadaf Ebrahimi offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8857*b7893ccfSSadaf Ebrahimi }
8858*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8859*b7893ccfSSadaf Ebrahimi
8860*b7893ccfSSadaf Ebrahimi if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8861*b7893ccfSSadaf Ebrahimi {
8862*b7893ccfSSadaf Ebrahimi const size_t suballoc2ndCount = suballocations2nd.size();
8863*b7893ccfSSadaf Ebrahimi size_t nullItem2ndCount = 0;
8864*b7893ccfSSadaf Ebrahimi for(size_t i = suballoc2ndCount; i--; )
8865*b7893ccfSSadaf Ebrahimi {
8866*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = suballocations2nd[i];
8867*b7893ccfSSadaf Ebrahimi const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8868*b7893ccfSSadaf Ebrahimi
8869*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8870*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(suballoc.offset >= offset);
8871*b7893ccfSSadaf Ebrahimi
8872*b7893ccfSSadaf Ebrahimi if(!currFree)
8873*b7893ccfSSadaf Ebrahimi {
8874*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8875*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8876*b7893ccfSSadaf Ebrahimi sumUsedSize += suballoc.size;
8877*b7893ccfSSadaf Ebrahimi }
8878*b7893ccfSSadaf Ebrahimi else
8879*b7893ccfSSadaf Ebrahimi {
8880*b7893ccfSSadaf Ebrahimi ++nullItem2ndCount;
8881*b7893ccfSSadaf Ebrahimi }
8882*b7893ccfSSadaf Ebrahimi
8883*b7893ccfSSadaf Ebrahimi offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8884*b7893ccfSSadaf Ebrahimi }
8885*b7893ccfSSadaf Ebrahimi
8886*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8887*b7893ccfSSadaf Ebrahimi }
8888*b7893ccfSSadaf Ebrahimi
8889*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(offset <= GetSize());
8890*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8891*b7893ccfSSadaf Ebrahimi
8892*b7893ccfSSadaf Ebrahimi return true;
8893*b7893ccfSSadaf Ebrahimi }
8894*b7893ccfSSadaf Ebrahimi
GetAllocationCount()8895*b7893ccfSSadaf Ebrahimi size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8896*b7893ccfSSadaf Ebrahimi {
8897*b7893ccfSSadaf Ebrahimi return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8898*b7893ccfSSadaf Ebrahimi AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8899*b7893ccfSSadaf Ebrahimi }
8900*b7893ccfSSadaf Ebrahimi
GetUnusedRangeSizeMax()8901*b7893ccfSSadaf Ebrahimi VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8902*b7893ccfSSadaf Ebrahimi {
8903*b7893ccfSSadaf Ebrahimi const VkDeviceSize size = GetSize();
8904*b7893ccfSSadaf Ebrahimi
8905*b7893ccfSSadaf Ebrahimi /*
8906*b7893ccfSSadaf Ebrahimi We don't consider gaps inside allocation vectors with freed allocations because
8907*b7893ccfSSadaf Ebrahimi they are not suitable for reuse in linear allocator. We consider only space that
8908*b7893ccfSSadaf Ebrahimi is available for new allocations.
8909*b7893ccfSSadaf Ebrahimi */
8910*b7893ccfSSadaf Ebrahimi if(IsEmpty())
8911*b7893ccfSSadaf Ebrahimi {
8912*b7893ccfSSadaf Ebrahimi return size;
8913*b7893ccfSSadaf Ebrahimi }
8914*b7893ccfSSadaf Ebrahimi
8915*b7893ccfSSadaf Ebrahimi const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8916*b7893ccfSSadaf Ebrahimi
8917*b7893ccfSSadaf Ebrahimi switch(m_2ndVectorMode)
8918*b7893ccfSSadaf Ebrahimi {
8919*b7893ccfSSadaf Ebrahimi case SECOND_VECTOR_EMPTY:
8920*b7893ccfSSadaf Ebrahimi /*
8921*b7893ccfSSadaf Ebrahimi Available space is after end of 1st, as well as before beginning of 1st (which
8922*b7893ccfSSadaf Ebrahimi whould make it a ring buffer).
8923*b7893ccfSSadaf Ebrahimi */
8924*b7893ccfSSadaf Ebrahimi {
8925*b7893ccfSSadaf Ebrahimi const size_t suballocations1stCount = suballocations1st.size();
8926*b7893ccfSSadaf Ebrahimi VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8927*b7893ccfSSadaf Ebrahimi const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8928*b7893ccfSSadaf Ebrahimi const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8929*b7893ccfSSadaf Ebrahimi return VMA_MAX(
8930*b7893ccfSSadaf Ebrahimi firstSuballoc.offset,
8931*b7893ccfSSadaf Ebrahimi size - (lastSuballoc.offset + lastSuballoc.size));
8932*b7893ccfSSadaf Ebrahimi }
8933*b7893ccfSSadaf Ebrahimi break;
8934*b7893ccfSSadaf Ebrahimi
8935*b7893ccfSSadaf Ebrahimi case SECOND_VECTOR_RING_BUFFER:
8936*b7893ccfSSadaf Ebrahimi /*
8937*b7893ccfSSadaf Ebrahimi Available space is only between end of 2nd and beginning of 1st.
8938*b7893ccfSSadaf Ebrahimi */
8939*b7893ccfSSadaf Ebrahimi {
8940*b7893ccfSSadaf Ebrahimi const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8941*b7893ccfSSadaf Ebrahimi const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8942*b7893ccfSSadaf Ebrahimi const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8943*b7893ccfSSadaf Ebrahimi return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8944*b7893ccfSSadaf Ebrahimi }
8945*b7893ccfSSadaf Ebrahimi break;
8946*b7893ccfSSadaf Ebrahimi
8947*b7893ccfSSadaf Ebrahimi case SECOND_VECTOR_DOUBLE_STACK:
8948*b7893ccfSSadaf Ebrahimi /*
8949*b7893ccfSSadaf Ebrahimi Available space is only between end of 1st and top of 2nd.
8950*b7893ccfSSadaf Ebrahimi */
8951*b7893ccfSSadaf Ebrahimi {
8952*b7893ccfSSadaf Ebrahimi const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8953*b7893ccfSSadaf Ebrahimi const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
8954*b7893ccfSSadaf Ebrahimi const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
8955*b7893ccfSSadaf Ebrahimi return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
8956*b7893ccfSSadaf Ebrahimi }
8957*b7893ccfSSadaf Ebrahimi break;
8958*b7893ccfSSadaf Ebrahimi
8959*b7893ccfSSadaf Ebrahimi default:
8960*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
8961*b7893ccfSSadaf Ebrahimi return 0;
8962*b7893ccfSSadaf Ebrahimi }
8963*b7893ccfSSadaf Ebrahimi }
8964*b7893ccfSSadaf Ebrahimi
CalcAllocationStatInfo(VmaStatInfo & outInfo)8965*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8966*b7893ccfSSadaf Ebrahimi {
8967*b7893ccfSSadaf Ebrahimi const VkDeviceSize size = GetSize();
8968*b7893ccfSSadaf Ebrahimi const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8969*b7893ccfSSadaf Ebrahimi const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8970*b7893ccfSSadaf Ebrahimi const size_t suballoc1stCount = suballocations1st.size();
8971*b7893ccfSSadaf Ebrahimi const size_t suballoc2ndCount = suballocations2nd.size();
8972*b7893ccfSSadaf Ebrahimi
8973*b7893ccfSSadaf Ebrahimi outInfo.blockCount = 1;
8974*b7893ccfSSadaf Ebrahimi outInfo.allocationCount = (uint32_t)GetAllocationCount();
8975*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeCount = 0;
8976*b7893ccfSSadaf Ebrahimi outInfo.usedBytes = 0;
8977*b7893ccfSSadaf Ebrahimi outInfo.allocationSizeMin = UINT64_MAX;
8978*b7893ccfSSadaf Ebrahimi outInfo.allocationSizeMax = 0;
8979*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMin = UINT64_MAX;
8980*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMax = 0;
8981*b7893ccfSSadaf Ebrahimi
8982*b7893ccfSSadaf Ebrahimi VkDeviceSize lastOffset = 0;
8983*b7893ccfSSadaf Ebrahimi
8984*b7893ccfSSadaf Ebrahimi if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8985*b7893ccfSSadaf Ebrahimi {
8986*b7893ccfSSadaf Ebrahimi const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8987*b7893ccfSSadaf Ebrahimi size_t nextAlloc2ndIndex = 0;
8988*b7893ccfSSadaf Ebrahimi while(lastOffset < freeSpace2ndTo1stEnd)
8989*b7893ccfSSadaf Ebrahimi {
8990*b7893ccfSSadaf Ebrahimi // Find next non-null allocation or move nextAllocIndex to the end.
8991*b7893ccfSSadaf Ebrahimi while(nextAlloc2ndIndex < suballoc2ndCount &&
8992*b7893ccfSSadaf Ebrahimi suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8993*b7893ccfSSadaf Ebrahimi {
8994*b7893ccfSSadaf Ebrahimi ++nextAlloc2ndIndex;
8995*b7893ccfSSadaf Ebrahimi }
8996*b7893ccfSSadaf Ebrahimi
8997*b7893ccfSSadaf Ebrahimi // Found non-null allocation.
8998*b7893ccfSSadaf Ebrahimi if(nextAlloc2ndIndex < suballoc2ndCount)
8999*b7893ccfSSadaf Ebrahimi {
9000*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9001*b7893ccfSSadaf Ebrahimi
9002*b7893ccfSSadaf Ebrahimi // 1. Process free space before this allocation.
9003*b7893ccfSSadaf Ebrahimi if(lastOffset < suballoc.offset)
9004*b7893ccfSSadaf Ebrahimi {
9005*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to suballoc.offset.
9006*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9007*b7893ccfSSadaf Ebrahimi ++outInfo.unusedRangeCount;
9008*b7893ccfSSadaf Ebrahimi outInfo.unusedBytes += unusedRangeSize;
9009*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9010*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9011*b7893ccfSSadaf Ebrahimi }
9012*b7893ccfSSadaf Ebrahimi
9013*b7893ccfSSadaf Ebrahimi // 2. Process this allocation.
9014*b7893ccfSSadaf Ebrahimi // There is allocation with suballoc.offset, suballoc.size.
9015*b7893ccfSSadaf Ebrahimi outInfo.usedBytes += suballoc.size;
9016*b7893ccfSSadaf Ebrahimi outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9017*b7893ccfSSadaf Ebrahimi outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9018*b7893ccfSSadaf Ebrahimi
9019*b7893ccfSSadaf Ebrahimi // 3. Prepare for next iteration.
9020*b7893ccfSSadaf Ebrahimi lastOffset = suballoc.offset + suballoc.size;
9021*b7893ccfSSadaf Ebrahimi ++nextAlloc2ndIndex;
9022*b7893ccfSSadaf Ebrahimi }
9023*b7893ccfSSadaf Ebrahimi // We are at the end.
9024*b7893ccfSSadaf Ebrahimi else
9025*b7893ccfSSadaf Ebrahimi {
9026*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9027*b7893ccfSSadaf Ebrahimi if(lastOffset < freeSpace2ndTo1stEnd)
9028*b7893ccfSSadaf Ebrahimi {
9029*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9030*b7893ccfSSadaf Ebrahimi ++outInfo.unusedRangeCount;
9031*b7893ccfSSadaf Ebrahimi outInfo.unusedBytes += unusedRangeSize;
9032*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9033*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9034*b7893ccfSSadaf Ebrahimi }
9035*b7893ccfSSadaf Ebrahimi
9036*b7893ccfSSadaf Ebrahimi // End of loop.
9037*b7893ccfSSadaf Ebrahimi lastOffset = freeSpace2ndTo1stEnd;
9038*b7893ccfSSadaf Ebrahimi }
9039*b7893ccfSSadaf Ebrahimi }
9040*b7893ccfSSadaf Ebrahimi }
9041*b7893ccfSSadaf Ebrahimi
9042*b7893ccfSSadaf Ebrahimi size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9043*b7893ccfSSadaf Ebrahimi const VkDeviceSize freeSpace1stTo2ndEnd =
9044*b7893ccfSSadaf Ebrahimi m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9045*b7893ccfSSadaf Ebrahimi while(lastOffset < freeSpace1stTo2ndEnd)
9046*b7893ccfSSadaf Ebrahimi {
9047*b7893ccfSSadaf Ebrahimi // Find next non-null allocation or move nextAllocIndex to the end.
9048*b7893ccfSSadaf Ebrahimi while(nextAlloc1stIndex < suballoc1stCount &&
9049*b7893ccfSSadaf Ebrahimi suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9050*b7893ccfSSadaf Ebrahimi {
9051*b7893ccfSSadaf Ebrahimi ++nextAlloc1stIndex;
9052*b7893ccfSSadaf Ebrahimi }
9053*b7893ccfSSadaf Ebrahimi
9054*b7893ccfSSadaf Ebrahimi // Found non-null allocation.
9055*b7893ccfSSadaf Ebrahimi if(nextAlloc1stIndex < suballoc1stCount)
9056*b7893ccfSSadaf Ebrahimi {
9057*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9058*b7893ccfSSadaf Ebrahimi
9059*b7893ccfSSadaf Ebrahimi // 1. Process free space before this allocation.
9060*b7893ccfSSadaf Ebrahimi if(lastOffset < suballoc.offset)
9061*b7893ccfSSadaf Ebrahimi {
9062*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to suballoc.offset.
9063*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9064*b7893ccfSSadaf Ebrahimi ++outInfo.unusedRangeCount;
9065*b7893ccfSSadaf Ebrahimi outInfo.unusedBytes += unusedRangeSize;
9066*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9067*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9068*b7893ccfSSadaf Ebrahimi }
9069*b7893ccfSSadaf Ebrahimi
9070*b7893ccfSSadaf Ebrahimi // 2. Process this allocation.
9071*b7893ccfSSadaf Ebrahimi // There is allocation with suballoc.offset, suballoc.size.
9072*b7893ccfSSadaf Ebrahimi outInfo.usedBytes += suballoc.size;
9073*b7893ccfSSadaf Ebrahimi outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9074*b7893ccfSSadaf Ebrahimi outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9075*b7893ccfSSadaf Ebrahimi
9076*b7893ccfSSadaf Ebrahimi // 3. Prepare for next iteration.
9077*b7893ccfSSadaf Ebrahimi lastOffset = suballoc.offset + suballoc.size;
9078*b7893ccfSSadaf Ebrahimi ++nextAlloc1stIndex;
9079*b7893ccfSSadaf Ebrahimi }
9080*b7893ccfSSadaf Ebrahimi // We are at the end.
9081*b7893ccfSSadaf Ebrahimi else
9082*b7893ccfSSadaf Ebrahimi {
9083*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9084*b7893ccfSSadaf Ebrahimi if(lastOffset < freeSpace1stTo2ndEnd)
9085*b7893ccfSSadaf Ebrahimi {
9086*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9087*b7893ccfSSadaf Ebrahimi ++outInfo.unusedRangeCount;
9088*b7893ccfSSadaf Ebrahimi outInfo.unusedBytes += unusedRangeSize;
9089*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9090*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9091*b7893ccfSSadaf Ebrahimi }
9092*b7893ccfSSadaf Ebrahimi
9093*b7893ccfSSadaf Ebrahimi // End of loop.
9094*b7893ccfSSadaf Ebrahimi lastOffset = freeSpace1stTo2ndEnd;
9095*b7893ccfSSadaf Ebrahimi }
9096*b7893ccfSSadaf Ebrahimi }
9097*b7893ccfSSadaf Ebrahimi
9098*b7893ccfSSadaf Ebrahimi if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9099*b7893ccfSSadaf Ebrahimi {
9100*b7893ccfSSadaf Ebrahimi size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9101*b7893ccfSSadaf Ebrahimi while(lastOffset < size)
9102*b7893ccfSSadaf Ebrahimi {
9103*b7893ccfSSadaf Ebrahimi // Find next non-null allocation or move nextAllocIndex to the end.
9104*b7893ccfSSadaf Ebrahimi while(nextAlloc2ndIndex != SIZE_MAX &&
9105*b7893ccfSSadaf Ebrahimi suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9106*b7893ccfSSadaf Ebrahimi {
9107*b7893ccfSSadaf Ebrahimi --nextAlloc2ndIndex;
9108*b7893ccfSSadaf Ebrahimi }
9109*b7893ccfSSadaf Ebrahimi
9110*b7893ccfSSadaf Ebrahimi // Found non-null allocation.
9111*b7893ccfSSadaf Ebrahimi if(nextAlloc2ndIndex != SIZE_MAX)
9112*b7893ccfSSadaf Ebrahimi {
9113*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9114*b7893ccfSSadaf Ebrahimi
9115*b7893ccfSSadaf Ebrahimi // 1. Process free space before this allocation.
9116*b7893ccfSSadaf Ebrahimi if(lastOffset < suballoc.offset)
9117*b7893ccfSSadaf Ebrahimi {
9118*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to suballoc.offset.
9119*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9120*b7893ccfSSadaf Ebrahimi ++outInfo.unusedRangeCount;
9121*b7893ccfSSadaf Ebrahimi outInfo.unusedBytes += unusedRangeSize;
9122*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9123*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9124*b7893ccfSSadaf Ebrahimi }
9125*b7893ccfSSadaf Ebrahimi
9126*b7893ccfSSadaf Ebrahimi // 2. Process this allocation.
9127*b7893ccfSSadaf Ebrahimi // There is allocation with suballoc.offset, suballoc.size.
9128*b7893ccfSSadaf Ebrahimi outInfo.usedBytes += suballoc.size;
9129*b7893ccfSSadaf Ebrahimi outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9130*b7893ccfSSadaf Ebrahimi outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9131*b7893ccfSSadaf Ebrahimi
9132*b7893ccfSSadaf Ebrahimi // 3. Prepare for next iteration.
9133*b7893ccfSSadaf Ebrahimi lastOffset = suballoc.offset + suballoc.size;
9134*b7893ccfSSadaf Ebrahimi --nextAlloc2ndIndex;
9135*b7893ccfSSadaf Ebrahimi }
9136*b7893ccfSSadaf Ebrahimi // We are at the end.
9137*b7893ccfSSadaf Ebrahimi else
9138*b7893ccfSSadaf Ebrahimi {
9139*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to size.
9140*b7893ccfSSadaf Ebrahimi if(lastOffset < size)
9141*b7893ccfSSadaf Ebrahimi {
9142*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusedRangeSize = size - lastOffset;
9143*b7893ccfSSadaf Ebrahimi ++outInfo.unusedRangeCount;
9144*b7893ccfSSadaf Ebrahimi outInfo.unusedBytes += unusedRangeSize;
9145*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9146*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9147*b7893ccfSSadaf Ebrahimi }
9148*b7893ccfSSadaf Ebrahimi
9149*b7893ccfSSadaf Ebrahimi // End of loop.
9150*b7893ccfSSadaf Ebrahimi lastOffset = size;
9151*b7893ccfSSadaf Ebrahimi }
9152*b7893ccfSSadaf Ebrahimi }
9153*b7893ccfSSadaf Ebrahimi }
9154*b7893ccfSSadaf Ebrahimi
9155*b7893ccfSSadaf Ebrahimi outInfo.unusedBytes = size - outInfo.usedBytes;
9156*b7893ccfSSadaf Ebrahimi }
9157*b7893ccfSSadaf Ebrahimi
AddPoolStats(VmaPoolStats & inoutStats)9158*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9159*b7893ccfSSadaf Ebrahimi {
9160*b7893ccfSSadaf Ebrahimi const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9161*b7893ccfSSadaf Ebrahimi const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9162*b7893ccfSSadaf Ebrahimi const VkDeviceSize size = GetSize();
9163*b7893ccfSSadaf Ebrahimi const size_t suballoc1stCount = suballocations1st.size();
9164*b7893ccfSSadaf Ebrahimi const size_t suballoc2ndCount = suballocations2nd.size();
9165*b7893ccfSSadaf Ebrahimi
9166*b7893ccfSSadaf Ebrahimi inoutStats.size += size;
9167*b7893ccfSSadaf Ebrahimi
9168*b7893ccfSSadaf Ebrahimi VkDeviceSize lastOffset = 0;
9169*b7893ccfSSadaf Ebrahimi
9170*b7893ccfSSadaf Ebrahimi if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9171*b7893ccfSSadaf Ebrahimi {
9172*b7893ccfSSadaf Ebrahimi const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9173*b7893ccfSSadaf Ebrahimi size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9174*b7893ccfSSadaf Ebrahimi while(lastOffset < freeSpace2ndTo1stEnd)
9175*b7893ccfSSadaf Ebrahimi {
9176*b7893ccfSSadaf Ebrahimi // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9177*b7893ccfSSadaf Ebrahimi while(nextAlloc2ndIndex < suballoc2ndCount &&
9178*b7893ccfSSadaf Ebrahimi suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9179*b7893ccfSSadaf Ebrahimi {
9180*b7893ccfSSadaf Ebrahimi ++nextAlloc2ndIndex;
9181*b7893ccfSSadaf Ebrahimi }
9182*b7893ccfSSadaf Ebrahimi
9183*b7893ccfSSadaf Ebrahimi // Found non-null allocation.
9184*b7893ccfSSadaf Ebrahimi if(nextAlloc2ndIndex < suballoc2ndCount)
9185*b7893ccfSSadaf Ebrahimi {
9186*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9187*b7893ccfSSadaf Ebrahimi
9188*b7893ccfSSadaf Ebrahimi // 1. Process free space before this allocation.
9189*b7893ccfSSadaf Ebrahimi if(lastOffset < suballoc.offset)
9190*b7893ccfSSadaf Ebrahimi {
9191*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to suballoc.offset.
9192*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9193*b7893ccfSSadaf Ebrahimi inoutStats.unusedSize += unusedRangeSize;
9194*b7893ccfSSadaf Ebrahimi ++inoutStats.unusedRangeCount;
9195*b7893ccfSSadaf Ebrahimi inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9196*b7893ccfSSadaf Ebrahimi }
9197*b7893ccfSSadaf Ebrahimi
9198*b7893ccfSSadaf Ebrahimi // 2. Process this allocation.
9199*b7893ccfSSadaf Ebrahimi // There is allocation with suballoc.offset, suballoc.size.
9200*b7893ccfSSadaf Ebrahimi ++inoutStats.allocationCount;
9201*b7893ccfSSadaf Ebrahimi
9202*b7893ccfSSadaf Ebrahimi // 3. Prepare for next iteration.
9203*b7893ccfSSadaf Ebrahimi lastOffset = suballoc.offset + suballoc.size;
9204*b7893ccfSSadaf Ebrahimi ++nextAlloc2ndIndex;
9205*b7893ccfSSadaf Ebrahimi }
9206*b7893ccfSSadaf Ebrahimi // We are at the end.
9207*b7893ccfSSadaf Ebrahimi else
9208*b7893ccfSSadaf Ebrahimi {
9209*b7893ccfSSadaf Ebrahimi if(lastOffset < freeSpace2ndTo1stEnd)
9210*b7893ccfSSadaf Ebrahimi {
9211*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9212*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9213*b7893ccfSSadaf Ebrahimi inoutStats.unusedSize += unusedRangeSize;
9214*b7893ccfSSadaf Ebrahimi ++inoutStats.unusedRangeCount;
9215*b7893ccfSSadaf Ebrahimi inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9216*b7893ccfSSadaf Ebrahimi }
9217*b7893ccfSSadaf Ebrahimi
9218*b7893ccfSSadaf Ebrahimi // End of loop.
9219*b7893ccfSSadaf Ebrahimi lastOffset = freeSpace2ndTo1stEnd;
9220*b7893ccfSSadaf Ebrahimi }
9221*b7893ccfSSadaf Ebrahimi }
9222*b7893ccfSSadaf Ebrahimi }
9223*b7893ccfSSadaf Ebrahimi
9224*b7893ccfSSadaf Ebrahimi size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9225*b7893ccfSSadaf Ebrahimi const VkDeviceSize freeSpace1stTo2ndEnd =
9226*b7893ccfSSadaf Ebrahimi m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9227*b7893ccfSSadaf Ebrahimi while(lastOffset < freeSpace1stTo2ndEnd)
9228*b7893ccfSSadaf Ebrahimi {
9229*b7893ccfSSadaf Ebrahimi // Find next non-null allocation or move nextAllocIndex to the end.
9230*b7893ccfSSadaf Ebrahimi while(nextAlloc1stIndex < suballoc1stCount &&
9231*b7893ccfSSadaf Ebrahimi suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9232*b7893ccfSSadaf Ebrahimi {
9233*b7893ccfSSadaf Ebrahimi ++nextAlloc1stIndex;
9234*b7893ccfSSadaf Ebrahimi }
9235*b7893ccfSSadaf Ebrahimi
9236*b7893ccfSSadaf Ebrahimi // Found non-null allocation.
9237*b7893ccfSSadaf Ebrahimi if(nextAlloc1stIndex < suballoc1stCount)
9238*b7893ccfSSadaf Ebrahimi {
9239*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9240*b7893ccfSSadaf Ebrahimi
9241*b7893ccfSSadaf Ebrahimi // 1. Process free space before this allocation.
9242*b7893ccfSSadaf Ebrahimi if(lastOffset < suballoc.offset)
9243*b7893ccfSSadaf Ebrahimi {
9244*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to suballoc.offset.
9245*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9246*b7893ccfSSadaf Ebrahimi inoutStats.unusedSize += unusedRangeSize;
9247*b7893ccfSSadaf Ebrahimi ++inoutStats.unusedRangeCount;
9248*b7893ccfSSadaf Ebrahimi inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9249*b7893ccfSSadaf Ebrahimi }
9250*b7893ccfSSadaf Ebrahimi
9251*b7893ccfSSadaf Ebrahimi // 2. Process this allocation.
9252*b7893ccfSSadaf Ebrahimi // There is allocation with suballoc.offset, suballoc.size.
9253*b7893ccfSSadaf Ebrahimi ++inoutStats.allocationCount;
9254*b7893ccfSSadaf Ebrahimi
9255*b7893ccfSSadaf Ebrahimi // 3. Prepare for next iteration.
9256*b7893ccfSSadaf Ebrahimi lastOffset = suballoc.offset + suballoc.size;
9257*b7893ccfSSadaf Ebrahimi ++nextAlloc1stIndex;
9258*b7893ccfSSadaf Ebrahimi }
9259*b7893ccfSSadaf Ebrahimi // We are at the end.
9260*b7893ccfSSadaf Ebrahimi else
9261*b7893ccfSSadaf Ebrahimi {
9262*b7893ccfSSadaf Ebrahimi if(lastOffset < freeSpace1stTo2ndEnd)
9263*b7893ccfSSadaf Ebrahimi {
9264*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9265*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9266*b7893ccfSSadaf Ebrahimi inoutStats.unusedSize += unusedRangeSize;
9267*b7893ccfSSadaf Ebrahimi ++inoutStats.unusedRangeCount;
9268*b7893ccfSSadaf Ebrahimi inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9269*b7893ccfSSadaf Ebrahimi }
9270*b7893ccfSSadaf Ebrahimi
9271*b7893ccfSSadaf Ebrahimi // End of loop.
9272*b7893ccfSSadaf Ebrahimi lastOffset = freeSpace1stTo2ndEnd;
9273*b7893ccfSSadaf Ebrahimi }
9274*b7893ccfSSadaf Ebrahimi }
9275*b7893ccfSSadaf Ebrahimi
9276*b7893ccfSSadaf Ebrahimi if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9277*b7893ccfSSadaf Ebrahimi {
9278*b7893ccfSSadaf Ebrahimi size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9279*b7893ccfSSadaf Ebrahimi while(lastOffset < size)
9280*b7893ccfSSadaf Ebrahimi {
9281*b7893ccfSSadaf Ebrahimi // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9282*b7893ccfSSadaf Ebrahimi while(nextAlloc2ndIndex != SIZE_MAX &&
9283*b7893ccfSSadaf Ebrahimi suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9284*b7893ccfSSadaf Ebrahimi {
9285*b7893ccfSSadaf Ebrahimi --nextAlloc2ndIndex;
9286*b7893ccfSSadaf Ebrahimi }
9287*b7893ccfSSadaf Ebrahimi
9288*b7893ccfSSadaf Ebrahimi // Found non-null allocation.
9289*b7893ccfSSadaf Ebrahimi if(nextAlloc2ndIndex != SIZE_MAX)
9290*b7893ccfSSadaf Ebrahimi {
9291*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9292*b7893ccfSSadaf Ebrahimi
9293*b7893ccfSSadaf Ebrahimi // 1. Process free space before this allocation.
9294*b7893ccfSSadaf Ebrahimi if(lastOffset < suballoc.offset)
9295*b7893ccfSSadaf Ebrahimi {
9296*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to suballoc.offset.
9297*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9298*b7893ccfSSadaf Ebrahimi inoutStats.unusedSize += unusedRangeSize;
9299*b7893ccfSSadaf Ebrahimi ++inoutStats.unusedRangeCount;
9300*b7893ccfSSadaf Ebrahimi inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9301*b7893ccfSSadaf Ebrahimi }
9302*b7893ccfSSadaf Ebrahimi
9303*b7893ccfSSadaf Ebrahimi // 2. Process this allocation.
9304*b7893ccfSSadaf Ebrahimi // There is allocation with suballoc.offset, suballoc.size.
9305*b7893ccfSSadaf Ebrahimi ++inoutStats.allocationCount;
9306*b7893ccfSSadaf Ebrahimi
9307*b7893ccfSSadaf Ebrahimi // 3. Prepare for next iteration.
9308*b7893ccfSSadaf Ebrahimi lastOffset = suballoc.offset + suballoc.size;
9309*b7893ccfSSadaf Ebrahimi --nextAlloc2ndIndex;
9310*b7893ccfSSadaf Ebrahimi }
9311*b7893ccfSSadaf Ebrahimi // We are at the end.
9312*b7893ccfSSadaf Ebrahimi else
9313*b7893ccfSSadaf Ebrahimi {
9314*b7893ccfSSadaf Ebrahimi if(lastOffset < size)
9315*b7893ccfSSadaf Ebrahimi {
9316*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to size.
9317*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusedRangeSize = size - lastOffset;
9318*b7893ccfSSadaf Ebrahimi inoutStats.unusedSize += unusedRangeSize;
9319*b7893ccfSSadaf Ebrahimi ++inoutStats.unusedRangeCount;
9320*b7893ccfSSadaf Ebrahimi inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9321*b7893ccfSSadaf Ebrahimi }
9322*b7893ccfSSadaf Ebrahimi
9323*b7893ccfSSadaf Ebrahimi // End of loop.
9324*b7893ccfSSadaf Ebrahimi lastOffset = size;
9325*b7893ccfSSadaf Ebrahimi }
9326*b7893ccfSSadaf Ebrahimi }
9327*b7893ccfSSadaf Ebrahimi }
9328*b7893ccfSSadaf Ebrahimi }
9329*b7893ccfSSadaf Ebrahimi
9330*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
PrintDetailedMap(class VmaJsonWriter & json)9331*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9332*b7893ccfSSadaf Ebrahimi {
9333*b7893ccfSSadaf Ebrahimi const VkDeviceSize size = GetSize();
9334*b7893ccfSSadaf Ebrahimi const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9335*b7893ccfSSadaf Ebrahimi const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9336*b7893ccfSSadaf Ebrahimi const size_t suballoc1stCount = suballocations1st.size();
9337*b7893ccfSSadaf Ebrahimi const size_t suballoc2ndCount = suballocations2nd.size();
9338*b7893ccfSSadaf Ebrahimi
9339*b7893ccfSSadaf Ebrahimi // FIRST PASS
9340*b7893ccfSSadaf Ebrahimi
9341*b7893ccfSSadaf Ebrahimi size_t unusedRangeCount = 0;
9342*b7893ccfSSadaf Ebrahimi VkDeviceSize usedBytes = 0;
9343*b7893ccfSSadaf Ebrahimi
9344*b7893ccfSSadaf Ebrahimi VkDeviceSize lastOffset = 0;
9345*b7893ccfSSadaf Ebrahimi
9346*b7893ccfSSadaf Ebrahimi size_t alloc2ndCount = 0;
9347*b7893ccfSSadaf Ebrahimi if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9348*b7893ccfSSadaf Ebrahimi {
9349*b7893ccfSSadaf Ebrahimi const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9350*b7893ccfSSadaf Ebrahimi size_t nextAlloc2ndIndex = 0;
9351*b7893ccfSSadaf Ebrahimi while(lastOffset < freeSpace2ndTo1stEnd)
9352*b7893ccfSSadaf Ebrahimi {
9353*b7893ccfSSadaf Ebrahimi // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9354*b7893ccfSSadaf Ebrahimi while(nextAlloc2ndIndex < suballoc2ndCount &&
9355*b7893ccfSSadaf Ebrahimi suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9356*b7893ccfSSadaf Ebrahimi {
9357*b7893ccfSSadaf Ebrahimi ++nextAlloc2ndIndex;
9358*b7893ccfSSadaf Ebrahimi }
9359*b7893ccfSSadaf Ebrahimi
9360*b7893ccfSSadaf Ebrahimi // Found non-null allocation.
9361*b7893ccfSSadaf Ebrahimi if(nextAlloc2ndIndex < suballoc2ndCount)
9362*b7893ccfSSadaf Ebrahimi {
9363*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9364*b7893ccfSSadaf Ebrahimi
9365*b7893ccfSSadaf Ebrahimi // 1. Process free space before this allocation.
9366*b7893ccfSSadaf Ebrahimi if(lastOffset < suballoc.offset)
9367*b7893ccfSSadaf Ebrahimi {
9368*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to suballoc.offset.
9369*b7893ccfSSadaf Ebrahimi ++unusedRangeCount;
9370*b7893ccfSSadaf Ebrahimi }
9371*b7893ccfSSadaf Ebrahimi
9372*b7893ccfSSadaf Ebrahimi // 2. Process this allocation.
9373*b7893ccfSSadaf Ebrahimi // There is allocation with suballoc.offset, suballoc.size.
9374*b7893ccfSSadaf Ebrahimi ++alloc2ndCount;
9375*b7893ccfSSadaf Ebrahimi usedBytes += suballoc.size;
9376*b7893ccfSSadaf Ebrahimi
9377*b7893ccfSSadaf Ebrahimi // 3. Prepare for next iteration.
9378*b7893ccfSSadaf Ebrahimi lastOffset = suballoc.offset + suballoc.size;
9379*b7893ccfSSadaf Ebrahimi ++nextAlloc2ndIndex;
9380*b7893ccfSSadaf Ebrahimi }
9381*b7893ccfSSadaf Ebrahimi // We are at the end.
9382*b7893ccfSSadaf Ebrahimi else
9383*b7893ccfSSadaf Ebrahimi {
9384*b7893ccfSSadaf Ebrahimi if(lastOffset < freeSpace2ndTo1stEnd)
9385*b7893ccfSSadaf Ebrahimi {
9386*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9387*b7893ccfSSadaf Ebrahimi ++unusedRangeCount;
9388*b7893ccfSSadaf Ebrahimi }
9389*b7893ccfSSadaf Ebrahimi
9390*b7893ccfSSadaf Ebrahimi // End of loop.
9391*b7893ccfSSadaf Ebrahimi lastOffset = freeSpace2ndTo1stEnd;
9392*b7893ccfSSadaf Ebrahimi }
9393*b7893ccfSSadaf Ebrahimi }
9394*b7893ccfSSadaf Ebrahimi }
9395*b7893ccfSSadaf Ebrahimi
9396*b7893ccfSSadaf Ebrahimi size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9397*b7893ccfSSadaf Ebrahimi size_t alloc1stCount = 0;
9398*b7893ccfSSadaf Ebrahimi const VkDeviceSize freeSpace1stTo2ndEnd =
9399*b7893ccfSSadaf Ebrahimi m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9400*b7893ccfSSadaf Ebrahimi while(lastOffset < freeSpace1stTo2ndEnd)
9401*b7893ccfSSadaf Ebrahimi {
9402*b7893ccfSSadaf Ebrahimi // Find next non-null allocation or move nextAllocIndex to the end.
9403*b7893ccfSSadaf Ebrahimi while(nextAlloc1stIndex < suballoc1stCount &&
9404*b7893ccfSSadaf Ebrahimi suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9405*b7893ccfSSadaf Ebrahimi {
9406*b7893ccfSSadaf Ebrahimi ++nextAlloc1stIndex;
9407*b7893ccfSSadaf Ebrahimi }
9408*b7893ccfSSadaf Ebrahimi
9409*b7893ccfSSadaf Ebrahimi // Found non-null allocation.
9410*b7893ccfSSadaf Ebrahimi if(nextAlloc1stIndex < suballoc1stCount)
9411*b7893ccfSSadaf Ebrahimi {
9412*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9413*b7893ccfSSadaf Ebrahimi
9414*b7893ccfSSadaf Ebrahimi // 1. Process free space before this allocation.
9415*b7893ccfSSadaf Ebrahimi if(lastOffset < suballoc.offset)
9416*b7893ccfSSadaf Ebrahimi {
9417*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to suballoc.offset.
9418*b7893ccfSSadaf Ebrahimi ++unusedRangeCount;
9419*b7893ccfSSadaf Ebrahimi }
9420*b7893ccfSSadaf Ebrahimi
9421*b7893ccfSSadaf Ebrahimi // 2. Process this allocation.
9422*b7893ccfSSadaf Ebrahimi // There is allocation with suballoc.offset, suballoc.size.
9423*b7893ccfSSadaf Ebrahimi ++alloc1stCount;
9424*b7893ccfSSadaf Ebrahimi usedBytes += suballoc.size;
9425*b7893ccfSSadaf Ebrahimi
9426*b7893ccfSSadaf Ebrahimi // 3. Prepare for next iteration.
9427*b7893ccfSSadaf Ebrahimi lastOffset = suballoc.offset + suballoc.size;
9428*b7893ccfSSadaf Ebrahimi ++nextAlloc1stIndex;
9429*b7893ccfSSadaf Ebrahimi }
9430*b7893ccfSSadaf Ebrahimi // We are at the end.
9431*b7893ccfSSadaf Ebrahimi else
9432*b7893ccfSSadaf Ebrahimi {
9433*b7893ccfSSadaf Ebrahimi if(lastOffset < size)
9434*b7893ccfSSadaf Ebrahimi {
9435*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9436*b7893ccfSSadaf Ebrahimi ++unusedRangeCount;
9437*b7893ccfSSadaf Ebrahimi }
9438*b7893ccfSSadaf Ebrahimi
9439*b7893ccfSSadaf Ebrahimi // End of loop.
9440*b7893ccfSSadaf Ebrahimi lastOffset = freeSpace1stTo2ndEnd;
9441*b7893ccfSSadaf Ebrahimi }
9442*b7893ccfSSadaf Ebrahimi }
9443*b7893ccfSSadaf Ebrahimi
9444*b7893ccfSSadaf Ebrahimi if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9445*b7893ccfSSadaf Ebrahimi {
9446*b7893ccfSSadaf Ebrahimi size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9447*b7893ccfSSadaf Ebrahimi while(lastOffset < size)
9448*b7893ccfSSadaf Ebrahimi {
9449*b7893ccfSSadaf Ebrahimi // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9450*b7893ccfSSadaf Ebrahimi while(nextAlloc2ndIndex != SIZE_MAX &&
9451*b7893ccfSSadaf Ebrahimi suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9452*b7893ccfSSadaf Ebrahimi {
9453*b7893ccfSSadaf Ebrahimi --nextAlloc2ndIndex;
9454*b7893ccfSSadaf Ebrahimi }
9455*b7893ccfSSadaf Ebrahimi
9456*b7893ccfSSadaf Ebrahimi // Found non-null allocation.
9457*b7893ccfSSadaf Ebrahimi if(nextAlloc2ndIndex != SIZE_MAX)
9458*b7893ccfSSadaf Ebrahimi {
9459*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9460*b7893ccfSSadaf Ebrahimi
9461*b7893ccfSSadaf Ebrahimi // 1. Process free space before this allocation.
9462*b7893ccfSSadaf Ebrahimi if(lastOffset < suballoc.offset)
9463*b7893ccfSSadaf Ebrahimi {
9464*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to suballoc.offset.
9465*b7893ccfSSadaf Ebrahimi ++unusedRangeCount;
9466*b7893ccfSSadaf Ebrahimi }
9467*b7893ccfSSadaf Ebrahimi
9468*b7893ccfSSadaf Ebrahimi // 2. Process this allocation.
9469*b7893ccfSSadaf Ebrahimi // There is allocation with suballoc.offset, suballoc.size.
9470*b7893ccfSSadaf Ebrahimi ++alloc2ndCount;
9471*b7893ccfSSadaf Ebrahimi usedBytes += suballoc.size;
9472*b7893ccfSSadaf Ebrahimi
9473*b7893ccfSSadaf Ebrahimi // 3. Prepare for next iteration.
9474*b7893ccfSSadaf Ebrahimi lastOffset = suballoc.offset + suballoc.size;
9475*b7893ccfSSadaf Ebrahimi --nextAlloc2ndIndex;
9476*b7893ccfSSadaf Ebrahimi }
9477*b7893ccfSSadaf Ebrahimi // We are at the end.
9478*b7893ccfSSadaf Ebrahimi else
9479*b7893ccfSSadaf Ebrahimi {
9480*b7893ccfSSadaf Ebrahimi if(lastOffset < size)
9481*b7893ccfSSadaf Ebrahimi {
9482*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to size.
9483*b7893ccfSSadaf Ebrahimi ++unusedRangeCount;
9484*b7893ccfSSadaf Ebrahimi }
9485*b7893ccfSSadaf Ebrahimi
9486*b7893ccfSSadaf Ebrahimi // End of loop.
9487*b7893ccfSSadaf Ebrahimi lastOffset = size;
9488*b7893ccfSSadaf Ebrahimi }
9489*b7893ccfSSadaf Ebrahimi }
9490*b7893ccfSSadaf Ebrahimi }
9491*b7893ccfSSadaf Ebrahimi
9492*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusedBytes = size - usedBytes;
9493*b7893ccfSSadaf Ebrahimi PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9494*b7893ccfSSadaf Ebrahimi
9495*b7893ccfSSadaf Ebrahimi // SECOND PASS
9496*b7893ccfSSadaf Ebrahimi lastOffset = 0;
9497*b7893ccfSSadaf Ebrahimi
9498*b7893ccfSSadaf Ebrahimi if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9499*b7893ccfSSadaf Ebrahimi {
9500*b7893ccfSSadaf Ebrahimi const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9501*b7893ccfSSadaf Ebrahimi size_t nextAlloc2ndIndex = 0;
9502*b7893ccfSSadaf Ebrahimi while(lastOffset < freeSpace2ndTo1stEnd)
9503*b7893ccfSSadaf Ebrahimi {
9504*b7893ccfSSadaf Ebrahimi // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9505*b7893ccfSSadaf Ebrahimi while(nextAlloc2ndIndex < suballoc2ndCount &&
9506*b7893ccfSSadaf Ebrahimi suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9507*b7893ccfSSadaf Ebrahimi {
9508*b7893ccfSSadaf Ebrahimi ++nextAlloc2ndIndex;
9509*b7893ccfSSadaf Ebrahimi }
9510*b7893ccfSSadaf Ebrahimi
9511*b7893ccfSSadaf Ebrahimi // Found non-null allocation.
9512*b7893ccfSSadaf Ebrahimi if(nextAlloc2ndIndex < suballoc2ndCount)
9513*b7893ccfSSadaf Ebrahimi {
9514*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9515*b7893ccfSSadaf Ebrahimi
9516*b7893ccfSSadaf Ebrahimi // 1. Process free space before this allocation.
9517*b7893ccfSSadaf Ebrahimi if(lastOffset < suballoc.offset)
9518*b7893ccfSSadaf Ebrahimi {
9519*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to suballoc.offset.
9520*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9521*b7893ccfSSadaf Ebrahimi PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9522*b7893ccfSSadaf Ebrahimi }
9523*b7893ccfSSadaf Ebrahimi
9524*b7893ccfSSadaf Ebrahimi // 2. Process this allocation.
9525*b7893ccfSSadaf Ebrahimi // There is allocation with suballoc.offset, suballoc.size.
9526*b7893ccfSSadaf Ebrahimi PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9527*b7893ccfSSadaf Ebrahimi
9528*b7893ccfSSadaf Ebrahimi // 3. Prepare for next iteration.
9529*b7893ccfSSadaf Ebrahimi lastOffset = suballoc.offset + suballoc.size;
9530*b7893ccfSSadaf Ebrahimi ++nextAlloc2ndIndex;
9531*b7893ccfSSadaf Ebrahimi }
9532*b7893ccfSSadaf Ebrahimi // We are at the end.
9533*b7893ccfSSadaf Ebrahimi else
9534*b7893ccfSSadaf Ebrahimi {
9535*b7893ccfSSadaf Ebrahimi if(lastOffset < freeSpace2ndTo1stEnd)
9536*b7893ccfSSadaf Ebrahimi {
9537*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9538*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9539*b7893ccfSSadaf Ebrahimi PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9540*b7893ccfSSadaf Ebrahimi }
9541*b7893ccfSSadaf Ebrahimi
9542*b7893ccfSSadaf Ebrahimi // End of loop.
9543*b7893ccfSSadaf Ebrahimi lastOffset = freeSpace2ndTo1stEnd;
9544*b7893ccfSSadaf Ebrahimi }
9545*b7893ccfSSadaf Ebrahimi }
9546*b7893ccfSSadaf Ebrahimi }
9547*b7893ccfSSadaf Ebrahimi
9548*b7893ccfSSadaf Ebrahimi nextAlloc1stIndex = m_1stNullItemsBeginCount;
9549*b7893ccfSSadaf Ebrahimi while(lastOffset < freeSpace1stTo2ndEnd)
9550*b7893ccfSSadaf Ebrahimi {
9551*b7893ccfSSadaf Ebrahimi // Find next non-null allocation or move nextAllocIndex to the end.
9552*b7893ccfSSadaf Ebrahimi while(nextAlloc1stIndex < suballoc1stCount &&
9553*b7893ccfSSadaf Ebrahimi suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9554*b7893ccfSSadaf Ebrahimi {
9555*b7893ccfSSadaf Ebrahimi ++nextAlloc1stIndex;
9556*b7893ccfSSadaf Ebrahimi }
9557*b7893ccfSSadaf Ebrahimi
9558*b7893ccfSSadaf Ebrahimi // Found non-null allocation.
9559*b7893ccfSSadaf Ebrahimi if(nextAlloc1stIndex < suballoc1stCount)
9560*b7893ccfSSadaf Ebrahimi {
9561*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9562*b7893ccfSSadaf Ebrahimi
9563*b7893ccfSSadaf Ebrahimi // 1. Process free space before this allocation.
9564*b7893ccfSSadaf Ebrahimi if(lastOffset < suballoc.offset)
9565*b7893ccfSSadaf Ebrahimi {
9566*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to suballoc.offset.
9567*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9568*b7893ccfSSadaf Ebrahimi PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9569*b7893ccfSSadaf Ebrahimi }
9570*b7893ccfSSadaf Ebrahimi
9571*b7893ccfSSadaf Ebrahimi // 2. Process this allocation.
9572*b7893ccfSSadaf Ebrahimi // There is allocation with suballoc.offset, suballoc.size.
9573*b7893ccfSSadaf Ebrahimi PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9574*b7893ccfSSadaf Ebrahimi
9575*b7893ccfSSadaf Ebrahimi // 3. Prepare for next iteration.
9576*b7893ccfSSadaf Ebrahimi lastOffset = suballoc.offset + suballoc.size;
9577*b7893ccfSSadaf Ebrahimi ++nextAlloc1stIndex;
9578*b7893ccfSSadaf Ebrahimi }
9579*b7893ccfSSadaf Ebrahimi // We are at the end.
9580*b7893ccfSSadaf Ebrahimi else
9581*b7893ccfSSadaf Ebrahimi {
9582*b7893ccfSSadaf Ebrahimi if(lastOffset < freeSpace1stTo2ndEnd)
9583*b7893ccfSSadaf Ebrahimi {
9584*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9585*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9586*b7893ccfSSadaf Ebrahimi PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9587*b7893ccfSSadaf Ebrahimi }
9588*b7893ccfSSadaf Ebrahimi
9589*b7893ccfSSadaf Ebrahimi // End of loop.
9590*b7893ccfSSadaf Ebrahimi lastOffset = freeSpace1stTo2ndEnd;
9591*b7893ccfSSadaf Ebrahimi }
9592*b7893ccfSSadaf Ebrahimi }
9593*b7893ccfSSadaf Ebrahimi
9594*b7893ccfSSadaf Ebrahimi if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9595*b7893ccfSSadaf Ebrahimi {
9596*b7893ccfSSadaf Ebrahimi size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9597*b7893ccfSSadaf Ebrahimi while(lastOffset < size)
9598*b7893ccfSSadaf Ebrahimi {
9599*b7893ccfSSadaf Ebrahimi // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9600*b7893ccfSSadaf Ebrahimi while(nextAlloc2ndIndex != SIZE_MAX &&
9601*b7893ccfSSadaf Ebrahimi suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9602*b7893ccfSSadaf Ebrahimi {
9603*b7893ccfSSadaf Ebrahimi --nextAlloc2ndIndex;
9604*b7893ccfSSadaf Ebrahimi }
9605*b7893ccfSSadaf Ebrahimi
9606*b7893ccfSSadaf Ebrahimi // Found non-null allocation.
9607*b7893ccfSSadaf Ebrahimi if(nextAlloc2ndIndex != SIZE_MAX)
9608*b7893ccfSSadaf Ebrahimi {
9609*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9610*b7893ccfSSadaf Ebrahimi
9611*b7893ccfSSadaf Ebrahimi // 1. Process free space before this allocation.
9612*b7893ccfSSadaf Ebrahimi if(lastOffset < suballoc.offset)
9613*b7893ccfSSadaf Ebrahimi {
9614*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to suballoc.offset.
9615*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9616*b7893ccfSSadaf Ebrahimi PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9617*b7893ccfSSadaf Ebrahimi }
9618*b7893ccfSSadaf Ebrahimi
9619*b7893ccfSSadaf Ebrahimi // 2. Process this allocation.
9620*b7893ccfSSadaf Ebrahimi // There is allocation with suballoc.offset, suballoc.size.
9621*b7893ccfSSadaf Ebrahimi PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9622*b7893ccfSSadaf Ebrahimi
9623*b7893ccfSSadaf Ebrahimi // 3. Prepare for next iteration.
9624*b7893ccfSSadaf Ebrahimi lastOffset = suballoc.offset + suballoc.size;
9625*b7893ccfSSadaf Ebrahimi --nextAlloc2ndIndex;
9626*b7893ccfSSadaf Ebrahimi }
9627*b7893ccfSSadaf Ebrahimi // We are at the end.
9628*b7893ccfSSadaf Ebrahimi else
9629*b7893ccfSSadaf Ebrahimi {
9630*b7893ccfSSadaf Ebrahimi if(lastOffset < size)
9631*b7893ccfSSadaf Ebrahimi {
9632*b7893ccfSSadaf Ebrahimi // There is free space from lastOffset to size.
9633*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusedRangeSize = size - lastOffset;
9634*b7893ccfSSadaf Ebrahimi PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9635*b7893ccfSSadaf Ebrahimi }
9636*b7893ccfSSadaf Ebrahimi
9637*b7893ccfSSadaf Ebrahimi // End of loop.
9638*b7893ccfSSadaf Ebrahimi lastOffset = size;
9639*b7893ccfSSadaf Ebrahimi }
9640*b7893ccfSSadaf Ebrahimi }
9641*b7893ccfSSadaf Ebrahimi }
9642*b7893ccfSSadaf Ebrahimi
9643*b7893ccfSSadaf Ebrahimi PrintDetailedMap_End(json);
9644*b7893ccfSSadaf Ebrahimi }
9645*b7893ccfSSadaf Ebrahimi #endif // #if VMA_STATS_STRING_ENABLED
9646*b7893ccfSSadaf Ebrahimi
CreateAllocationRequest(uint32_t currentFrameIndex,uint32_t frameInUseCount,VkDeviceSize bufferImageGranularity,VkDeviceSize allocSize,VkDeviceSize allocAlignment,bool upperAddress,VmaSuballocationType allocType,bool canMakeOtherLost,uint32_t strategy,VmaAllocationRequest * pAllocationRequest)9647*b7893ccfSSadaf Ebrahimi bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9648*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
9649*b7893ccfSSadaf Ebrahimi uint32_t frameInUseCount,
9650*b7893ccfSSadaf Ebrahimi VkDeviceSize bufferImageGranularity,
9651*b7893ccfSSadaf Ebrahimi VkDeviceSize allocSize,
9652*b7893ccfSSadaf Ebrahimi VkDeviceSize allocAlignment,
9653*b7893ccfSSadaf Ebrahimi bool upperAddress,
9654*b7893ccfSSadaf Ebrahimi VmaSuballocationType allocType,
9655*b7893ccfSSadaf Ebrahimi bool canMakeOtherLost,
9656*b7893ccfSSadaf Ebrahimi uint32_t strategy,
9657*b7893ccfSSadaf Ebrahimi VmaAllocationRequest* pAllocationRequest)
9658*b7893ccfSSadaf Ebrahimi {
9659*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocSize > 0);
9660*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9661*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pAllocationRequest != VMA_NULL);
9662*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(Validate());
9663*b7893ccfSSadaf Ebrahimi
9664*b7893ccfSSadaf Ebrahimi const VkDeviceSize size = GetSize();
9665*b7893ccfSSadaf Ebrahimi SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9666*b7893ccfSSadaf Ebrahimi SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9667*b7893ccfSSadaf Ebrahimi
9668*b7893ccfSSadaf Ebrahimi if(upperAddress)
9669*b7893ccfSSadaf Ebrahimi {
9670*b7893ccfSSadaf Ebrahimi if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9671*b7893ccfSSadaf Ebrahimi {
9672*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9673*b7893ccfSSadaf Ebrahimi return false;
9674*b7893ccfSSadaf Ebrahimi }
9675*b7893ccfSSadaf Ebrahimi
9676*b7893ccfSSadaf Ebrahimi // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9677*b7893ccfSSadaf Ebrahimi if(allocSize > size)
9678*b7893ccfSSadaf Ebrahimi {
9679*b7893ccfSSadaf Ebrahimi return false;
9680*b7893ccfSSadaf Ebrahimi }
9681*b7893ccfSSadaf Ebrahimi VkDeviceSize resultBaseOffset = size - allocSize;
9682*b7893ccfSSadaf Ebrahimi if(!suballocations2nd.empty())
9683*b7893ccfSSadaf Ebrahimi {
9684*b7893ccfSSadaf Ebrahimi const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9685*b7893ccfSSadaf Ebrahimi resultBaseOffset = lastSuballoc.offset - allocSize;
9686*b7893ccfSSadaf Ebrahimi if(allocSize > lastSuballoc.offset)
9687*b7893ccfSSadaf Ebrahimi {
9688*b7893ccfSSadaf Ebrahimi return false;
9689*b7893ccfSSadaf Ebrahimi }
9690*b7893ccfSSadaf Ebrahimi }
9691*b7893ccfSSadaf Ebrahimi
9692*b7893ccfSSadaf Ebrahimi // Start from offset equal to end of free space.
9693*b7893ccfSSadaf Ebrahimi VkDeviceSize resultOffset = resultBaseOffset;
9694*b7893ccfSSadaf Ebrahimi
9695*b7893ccfSSadaf Ebrahimi // Apply VMA_DEBUG_MARGIN at the end.
9696*b7893ccfSSadaf Ebrahimi if(VMA_DEBUG_MARGIN > 0)
9697*b7893ccfSSadaf Ebrahimi {
9698*b7893ccfSSadaf Ebrahimi if(resultOffset < VMA_DEBUG_MARGIN)
9699*b7893ccfSSadaf Ebrahimi {
9700*b7893ccfSSadaf Ebrahimi return false;
9701*b7893ccfSSadaf Ebrahimi }
9702*b7893ccfSSadaf Ebrahimi resultOffset -= VMA_DEBUG_MARGIN;
9703*b7893ccfSSadaf Ebrahimi }
9704*b7893ccfSSadaf Ebrahimi
9705*b7893ccfSSadaf Ebrahimi // Apply alignment.
9706*b7893ccfSSadaf Ebrahimi resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9707*b7893ccfSSadaf Ebrahimi
9708*b7893ccfSSadaf Ebrahimi // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9709*b7893ccfSSadaf Ebrahimi // Make bigger alignment if necessary.
9710*b7893ccfSSadaf Ebrahimi if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9711*b7893ccfSSadaf Ebrahimi {
9712*b7893ccfSSadaf Ebrahimi bool bufferImageGranularityConflict = false;
9713*b7893ccfSSadaf Ebrahimi for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9714*b7893ccfSSadaf Ebrahimi {
9715*b7893ccfSSadaf Ebrahimi const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9716*b7893ccfSSadaf Ebrahimi if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9717*b7893ccfSSadaf Ebrahimi {
9718*b7893ccfSSadaf Ebrahimi if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9719*b7893ccfSSadaf Ebrahimi {
9720*b7893ccfSSadaf Ebrahimi bufferImageGranularityConflict = true;
9721*b7893ccfSSadaf Ebrahimi break;
9722*b7893ccfSSadaf Ebrahimi }
9723*b7893ccfSSadaf Ebrahimi }
9724*b7893ccfSSadaf Ebrahimi else
9725*b7893ccfSSadaf Ebrahimi // Already on previous page.
9726*b7893ccfSSadaf Ebrahimi break;
9727*b7893ccfSSadaf Ebrahimi }
9728*b7893ccfSSadaf Ebrahimi if(bufferImageGranularityConflict)
9729*b7893ccfSSadaf Ebrahimi {
9730*b7893ccfSSadaf Ebrahimi resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9731*b7893ccfSSadaf Ebrahimi }
9732*b7893ccfSSadaf Ebrahimi }
9733*b7893ccfSSadaf Ebrahimi
9734*b7893ccfSSadaf Ebrahimi // There is enough free space.
9735*b7893ccfSSadaf Ebrahimi const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9736*b7893ccfSSadaf Ebrahimi suballocations1st.back().offset + suballocations1st.back().size :
9737*b7893ccfSSadaf Ebrahimi 0;
9738*b7893ccfSSadaf Ebrahimi if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9739*b7893ccfSSadaf Ebrahimi {
9740*b7893ccfSSadaf Ebrahimi // Check previous suballocations for BufferImageGranularity conflicts.
9741*b7893ccfSSadaf Ebrahimi // If conflict exists, allocation cannot be made here.
9742*b7893ccfSSadaf Ebrahimi if(bufferImageGranularity > 1)
9743*b7893ccfSSadaf Ebrahimi {
9744*b7893ccfSSadaf Ebrahimi for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9745*b7893ccfSSadaf Ebrahimi {
9746*b7893ccfSSadaf Ebrahimi const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9747*b7893ccfSSadaf Ebrahimi if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9748*b7893ccfSSadaf Ebrahimi {
9749*b7893ccfSSadaf Ebrahimi if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9750*b7893ccfSSadaf Ebrahimi {
9751*b7893ccfSSadaf Ebrahimi return false;
9752*b7893ccfSSadaf Ebrahimi }
9753*b7893ccfSSadaf Ebrahimi }
9754*b7893ccfSSadaf Ebrahimi else
9755*b7893ccfSSadaf Ebrahimi {
9756*b7893ccfSSadaf Ebrahimi // Already on next page.
9757*b7893ccfSSadaf Ebrahimi break;
9758*b7893ccfSSadaf Ebrahimi }
9759*b7893ccfSSadaf Ebrahimi }
9760*b7893ccfSSadaf Ebrahimi }
9761*b7893ccfSSadaf Ebrahimi
9762*b7893ccfSSadaf Ebrahimi // All tests passed: Success.
9763*b7893ccfSSadaf Ebrahimi pAllocationRequest->offset = resultOffset;
9764*b7893ccfSSadaf Ebrahimi pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9765*b7893ccfSSadaf Ebrahimi pAllocationRequest->sumItemSize = 0;
9766*b7893ccfSSadaf Ebrahimi // pAllocationRequest->item unused.
9767*b7893ccfSSadaf Ebrahimi pAllocationRequest->itemsToMakeLostCount = 0;
9768*b7893ccfSSadaf Ebrahimi return true;
9769*b7893ccfSSadaf Ebrahimi }
9770*b7893ccfSSadaf Ebrahimi }
9771*b7893ccfSSadaf Ebrahimi else // !upperAddress
9772*b7893ccfSSadaf Ebrahimi {
9773*b7893ccfSSadaf Ebrahimi if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9774*b7893ccfSSadaf Ebrahimi {
9775*b7893ccfSSadaf Ebrahimi // Try to allocate at the end of 1st vector.
9776*b7893ccfSSadaf Ebrahimi
9777*b7893ccfSSadaf Ebrahimi VkDeviceSize resultBaseOffset = 0;
9778*b7893ccfSSadaf Ebrahimi if(!suballocations1st.empty())
9779*b7893ccfSSadaf Ebrahimi {
9780*b7893ccfSSadaf Ebrahimi const VmaSuballocation& lastSuballoc = suballocations1st.back();
9781*b7893ccfSSadaf Ebrahimi resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9782*b7893ccfSSadaf Ebrahimi }
9783*b7893ccfSSadaf Ebrahimi
9784*b7893ccfSSadaf Ebrahimi // Start from offset equal to beginning of free space.
9785*b7893ccfSSadaf Ebrahimi VkDeviceSize resultOffset = resultBaseOffset;
9786*b7893ccfSSadaf Ebrahimi
9787*b7893ccfSSadaf Ebrahimi // Apply VMA_DEBUG_MARGIN at the beginning.
9788*b7893ccfSSadaf Ebrahimi if(VMA_DEBUG_MARGIN > 0)
9789*b7893ccfSSadaf Ebrahimi {
9790*b7893ccfSSadaf Ebrahimi resultOffset += VMA_DEBUG_MARGIN;
9791*b7893ccfSSadaf Ebrahimi }
9792*b7893ccfSSadaf Ebrahimi
9793*b7893ccfSSadaf Ebrahimi // Apply alignment.
9794*b7893ccfSSadaf Ebrahimi resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9795*b7893ccfSSadaf Ebrahimi
9796*b7893ccfSSadaf Ebrahimi // Check previous suballocations for BufferImageGranularity conflicts.
9797*b7893ccfSSadaf Ebrahimi // Make bigger alignment if necessary.
9798*b7893ccfSSadaf Ebrahimi if(bufferImageGranularity > 1 && !suballocations1st.empty())
9799*b7893ccfSSadaf Ebrahimi {
9800*b7893ccfSSadaf Ebrahimi bool bufferImageGranularityConflict = false;
9801*b7893ccfSSadaf Ebrahimi for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9802*b7893ccfSSadaf Ebrahimi {
9803*b7893ccfSSadaf Ebrahimi const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9804*b7893ccfSSadaf Ebrahimi if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9805*b7893ccfSSadaf Ebrahimi {
9806*b7893ccfSSadaf Ebrahimi if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9807*b7893ccfSSadaf Ebrahimi {
9808*b7893ccfSSadaf Ebrahimi bufferImageGranularityConflict = true;
9809*b7893ccfSSadaf Ebrahimi break;
9810*b7893ccfSSadaf Ebrahimi }
9811*b7893ccfSSadaf Ebrahimi }
9812*b7893ccfSSadaf Ebrahimi else
9813*b7893ccfSSadaf Ebrahimi // Already on previous page.
9814*b7893ccfSSadaf Ebrahimi break;
9815*b7893ccfSSadaf Ebrahimi }
9816*b7893ccfSSadaf Ebrahimi if(bufferImageGranularityConflict)
9817*b7893ccfSSadaf Ebrahimi {
9818*b7893ccfSSadaf Ebrahimi resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9819*b7893ccfSSadaf Ebrahimi }
9820*b7893ccfSSadaf Ebrahimi }
9821*b7893ccfSSadaf Ebrahimi
9822*b7893ccfSSadaf Ebrahimi const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9823*b7893ccfSSadaf Ebrahimi suballocations2nd.back().offset : size;
9824*b7893ccfSSadaf Ebrahimi
9825*b7893ccfSSadaf Ebrahimi // There is enough free space at the end after alignment.
9826*b7893ccfSSadaf Ebrahimi if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9827*b7893ccfSSadaf Ebrahimi {
9828*b7893ccfSSadaf Ebrahimi // Check next suballocations for BufferImageGranularity conflicts.
9829*b7893ccfSSadaf Ebrahimi // If conflict exists, allocation cannot be made here.
9830*b7893ccfSSadaf Ebrahimi if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9831*b7893ccfSSadaf Ebrahimi {
9832*b7893ccfSSadaf Ebrahimi for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9833*b7893ccfSSadaf Ebrahimi {
9834*b7893ccfSSadaf Ebrahimi const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9835*b7893ccfSSadaf Ebrahimi if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9836*b7893ccfSSadaf Ebrahimi {
9837*b7893ccfSSadaf Ebrahimi if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9838*b7893ccfSSadaf Ebrahimi {
9839*b7893ccfSSadaf Ebrahimi return false;
9840*b7893ccfSSadaf Ebrahimi }
9841*b7893ccfSSadaf Ebrahimi }
9842*b7893ccfSSadaf Ebrahimi else
9843*b7893ccfSSadaf Ebrahimi {
9844*b7893ccfSSadaf Ebrahimi // Already on previous page.
9845*b7893ccfSSadaf Ebrahimi break;
9846*b7893ccfSSadaf Ebrahimi }
9847*b7893ccfSSadaf Ebrahimi }
9848*b7893ccfSSadaf Ebrahimi }
9849*b7893ccfSSadaf Ebrahimi
9850*b7893ccfSSadaf Ebrahimi // All tests passed: Success.
9851*b7893ccfSSadaf Ebrahimi pAllocationRequest->offset = resultOffset;
9852*b7893ccfSSadaf Ebrahimi pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9853*b7893ccfSSadaf Ebrahimi pAllocationRequest->sumItemSize = 0;
9854*b7893ccfSSadaf Ebrahimi // pAllocationRequest->item unused.
9855*b7893ccfSSadaf Ebrahimi pAllocationRequest->itemsToMakeLostCount = 0;
9856*b7893ccfSSadaf Ebrahimi return true;
9857*b7893ccfSSadaf Ebrahimi }
9858*b7893ccfSSadaf Ebrahimi }
9859*b7893ccfSSadaf Ebrahimi
9860*b7893ccfSSadaf Ebrahimi // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9861*b7893ccfSSadaf Ebrahimi // beginning of 1st vector as the end of free space.
9862*b7893ccfSSadaf Ebrahimi if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9863*b7893ccfSSadaf Ebrahimi {
9864*b7893ccfSSadaf Ebrahimi VMA_ASSERT(!suballocations1st.empty());
9865*b7893ccfSSadaf Ebrahimi
9866*b7893ccfSSadaf Ebrahimi VkDeviceSize resultBaseOffset = 0;
9867*b7893ccfSSadaf Ebrahimi if(!suballocations2nd.empty())
9868*b7893ccfSSadaf Ebrahimi {
9869*b7893ccfSSadaf Ebrahimi const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9870*b7893ccfSSadaf Ebrahimi resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9871*b7893ccfSSadaf Ebrahimi }
9872*b7893ccfSSadaf Ebrahimi
9873*b7893ccfSSadaf Ebrahimi // Start from offset equal to beginning of free space.
9874*b7893ccfSSadaf Ebrahimi VkDeviceSize resultOffset = resultBaseOffset;
9875*b7893ccfSSadaf Ebrahimi
9876*b7893ccfSSadaf Ebrahimi // Apply VMA_DEBUG_MARGIN at the beginning.
9877*b7893ccfSSadaf Ebrahimi if(VMA_DEBUG_MARGIN > 0)
9878*b7893ccfSSadaf Ebrahimi {
9879*b7893ccfSSadaf Ebrahimi resultOffset += VMA_DEBUG_MARGIN;
9880*b7893ccfSSadaf Ebrahimi }
9881*b7893ccfSSadaf Ebrahimi
9882*b7893ccfSSadaf Ebrahimi // Apply alignment.
9883*b7893ccfSSadaf Ebrahimi resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9884*b7893ccfSSadaf Ebrahimi
9885*b7893ccfSSadaf Ebrahimi // Check previous suballocations for BufferImageGranularity conflicts.
9886*b7893ccfSSadaf Ebrahimi // Make bigger alignment if necessary.
9887*b7893ccfSSadaf Ebrahimi if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9888*b7893ccfSSadaf Ebrahimi {
9889*b7893ccfSSadaf Ebrahimi bool bufferImageGranularityConflict = false;
9890*b7893ccfSSadaf Ebrahimi for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9891*b7893ccfSSadaf Ebrahimi {
9892*b7893ccfSSadaf Ebrahimi const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9893*b7893ccfSSadaf Ebrahimi if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9894*b7893ccfSSadaf Ebrahimi {
9895*b7893ccfSSadaf Ebrahimi if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9896*b7893ccfSSadaf Ebrahimi {
9897*b7893ccfSSadaf Ebrahimi bufferImageGranularityConflict = true;
9898*b7893ccfSSadaf Ebrahimi break;
9899*b7893ccfSSadaf Ebrahimi }
9900*b7893ccfSSadaf Ebrahimi }
9901*b7893ccfSSadaf Ebrahimi else
9902*b7893ccfSSadaf Ebrahimi // Already on previous page.
9903*b7893ccfSSadaf Ebrahimi break;
9904*b7893ccfSSadaf Ebrahimi }
9905*b7893ccfSSadaf Ebrahimi if(bufferImageGranularityConflict)
9906*b7893ccfSSadaf Ebrahimi {
9907*b7893ccfSSadaf Ebrahimi resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9908*b7893ccfSSadaf Ebrahimi }
9909*b7893ccfSSadaf Ebrahimi }
9910*b7893ccfSSadaf Ebrahimi
9911*b7893ccfSSadaf Ebrahimi pAllocationRequest->itemsToMakeLostCount = 0;
9912*b7893ccfSSadaf Ebrahimi pAllocationRequest->sumItemSize = 0;
9913*b7893ccfSSadaf Ebrahimi size_t index1st = m_1stNullItemsBeginCount;
9914*b7893ccfSSadaf Ebrahimi
9915*b7893ccfSSadaf Ebrahimi if(canMakeOtherLost)
9916*b7893ccfSSadaf Ebrahimi {
9917*b7893ccfSSadaf Ebrahimi while(index1st < suballocations1st.size() &&
9918*b7893ccfSSadaf Ebrahimi resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
9919*b7893ccfSSadaf Ebrahimi {
9920*b7893ccfSSadaf Ebrahimi // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
9921*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = suballocations1st[index1st];
9922*b7893ccfSSadaf Ebrahimi if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
9923*b7893ccfSSadaf Ebrahimi {
9924*b7893ccfSSadaf Ebrahimi // No problem.
9925*b7893ccfSSadaf Ebrahimi }
9926*b7893ccfSSadaf Ebrahimi else
9927*b7893ccfSSadaf Ebrahimi {
9928*b7893ccfSSadaf Ebrahimi VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9929*b7893ccfSSadaf Ebrahimi if(suballoc.hAllocation->CanBecomeLost() &&
9930*b7893ccfSSadaf Ebrahimi suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9931*b7893ccfSSadaf Ebrahimi {
9932*b7893ccfSSadaf Ebrahimi ++pAllocationRequest->itemsToMakeLostCount;
9933*b7893ccfSSadaf Ebrahimi pAllocationRequest->sumItemSize += suballoc.size;
9934*b7893ccfSSadaf Ebrahimi }
9935*b7893ccfSSadaf Ebrahimi else
9936*b7893ccfSSadaf Ebrahimi {
9937*b7893ccfSSadaf Ebrahimi return false;
9938*b7893ccfSSadaf Ebrahimi }
9939*b7893ccfSSadaf Ebrahimi }
9940*b7893ccfSSadaf Ebrahimi ++index1st;
9941*b7893ccfSSadaf Ebrahimi }
9942*b7893ccfSSadaf Ebrahimi
9943*b7893ccfSSadaf Ebrahimi // Check next suballocations for BufferImageGranularity conflicts.
9944*b7893ccfSSadaf Ebrahimi // If conflict exists, we must mark more allocations lost or fail.
9945*b7893ccfSSadaf Ebrahimi if(bufferImageGranularity > 1)
9946*b7893ccfSSadaf Ebrahimi {
9947*b7893ccfSSadaf Ebrahimi while(index1st < suballocations1st.size())
9948*b7893ccfSSadaf Ebrahimi {
9949*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = suballocations1st[index1st];
9950*b7893ccfSSadaf Ebrahimi if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
9951*b7893ccfSSadaf Ebrahimi {
9952*b7893ccfSSadaf Ebrahimi if(suballoc.hAllocation != VK_NULL_HANDLE)
9953*b7893ccfSSadaf Ebrahimi {
9954*b7893ccfSSadaf Ebrahimi // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
9955*b7893ccfSSadaf Ebrahimi if(suballoc.hAllocation->CanBecomeLost() &&
9956*b7893ccfSSadaf Ebrahimi suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9957*b7893ccfSSadaf Ebrahimi {
9958*b7893ccfSSadaf Ebrahimi ++pAllocationRequest->itemsToMakeLostCount;
9959*b7893ccfSSadaf Ebrahimi pAllocationRequest->sumItemSize += suballoc.size;
9960*b7893ccfSSadaf Ebrahimi }
9961*b7893ccfSSadaf Ebrahimi else
9962*b7893ccfSSadaf Ebrahimi {
9963*b7893ccfSSadaf Ebrahimi return false;
9964*b7893ccfSSadaf Ebrahimi }
9965*b7893ccfSSadaf Ebrahimi }
9966*b7893ccfSSadaf Ebrahimi }
9967*b7893ccfSSadaf Ebrahimi else
9968*b7893ccfSSadaf Ebrahimi {
9969*b7893ccfSSadaf Ebrahimi // Already on next page.
9970*b7893ccfSSadaf Ebrahimi break;
9971*b7893ccfSSadaf Ebrahimi }
9972*b7893ccfSSadaf Ebrahimi ++index1st;
9973*b7893ccfSSadaf Ebrahimi }
9974*b7893ccfSSadaf Ebrahimi }
9975*b7893ccfSSadaf Ebrahimi }
9976*b7893ccfSSadaf Ebrahimi
9977*b7893ccfSSadaf Ebrahimi // There is enough free space at the end after alignment.
9978*b7893ccfSSadaf Ebrahimi if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
9979*b7893ccfSSadaf Ebrahimi (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
9980*b7893ccfSSadaf Ebrahimi {
9981*b7893ccfSSadaf Ebrahimi // Check next suballocations for BufferImageGranularity conflicts.
9982*b7893ccfSSadaf Ebrahimi // If conflict exists, allocation cannot be made here.
9983*b7893ccfSSadaf Ebrahimi if(bufferImageGranularity > 1)
9984*b7893ccfSSadaf Ebrahimi {
9985*b7893ccfSSadaf Ebrahimi for(size_t nextSuballocIndex = index1st;
9986*b7893ccfSSadaf Ebrahimi nextSuballocIndex < suballocations1st.size();
9987*b7893ccfSSadaf Ebrahimi nextSuballocIndex++)
9988*b7893ccfSSadaf Ebrahimi {
9989*b7893ccfSSadaf Ebrahimi const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9990*b7893ccfSSadaf Ebrahimi if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9991*b7893ccfSSadaf Ebrahimi {
9992*b7893ccfSSadaf Ebrahimi if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9993*b7893ccfSSadaf Ebrahimi {
9994*b7893ccfSSadaf Ebrahimi return false;
9995*b7893ccfSSadaf Ebrahimi }
9996*b7893ccfSSadaf Ebrahimi }
9997*b7893ccfSSadaf Ebrahimi else
9998*b7893ccfSSadaf Ebrahimi {
9999*b7893ccfSSadaf Ebrahimi // Already on next page.
10000*b7893ccfSSadaf Ebrahimi break;
10001*b7893ccfSSadaf Ebrahimi }
10002*b7893ccfSSadaf Ebrahimi }
10003*b7893ccfSSadaf Ebrahimi }
10004*b7893ccfSSadaf Ebrahimi
10005*b7893ccfSSadaf Ebrahimi // All tests passed: Success.
10006*b7893ccfSSadaf Ebrahimi pAllocationRequest->offset = resultOffset;
10007*b7893ccfSSadaf Ebrahimi pAllocationRequest->sumFreeSize =
10008*b7893ccfSSadaf Ebrahimi (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10009*b7893ccfSSadaf Ebrahimi - resultBaseOffset
10010*b7893ccfSSadaf Ebrahimi - pAllocationRequest->sumItemSize;
10011*b7893ccfSSadaf Ebrahimi // pAllocationRequest->item unused.
10012*b7893ccfSSadaf Ebrahimi return true;
10013*b7893ccfSSadaf Ebrahimi }
10014*b7893ccfSSadaf Ebrahimi }
10015*b7893ccfSSadaf Ebrahimi }
10016*b7893ccfSSadaf Ebrahimi
10017*b7893ccfSSadaf Ebrahimi return false;
10018*b7893ccfSSadaf Ebrahimi }
10019*b7893ccfSSadaf Ebrahimi
MakeRequestedAllocationsLost(uint32_t currentFrameIndex,uint32_t frameInUseCount,VmaAllocationRequest * pAllocationRequest)10020*b7893ccfSSadaf Ebrahimi bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10021*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
10022*b7893ccfSSadaf Ebrahimi uint32_t frameInUseCount,
10023*b7893ccfSSadaf Ebrahimi VmaAllocationRequest* pAllocationRequest)
10024*b7893ccfSSadaf Ebrahimi {
10025*b7893ccfSSadaf Ebrahimi if(pAllocationRequest->itemsToMakeLostCount == 0)
10026*b7893ccfSSadaf Ebrahimi {
10027*b7893ccfSSadaf Ebrahimi return true;
10028*b7893ccfSSadaf Ebrahimi }
10029*b7893ccfSSadaf Ebrahimi
10030*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10031*b7893ccfSSadaf Ebrahimi
10032*b7893ccfSSadaf Ebrahimi SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10033*b7893ccfSSadaf Ebrahimi size_t index1st = m_1stNullItemsBeginCount;
10034*b7893ccfSSadaf Ebrahimi size_t madeLostCount = 0;
10035*b7893ccfSSadaf Ebrahimi while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10036*b7893ccfSSadaf Ebrahimi {
10037*b7893ccfSSadaf Ebrahimi VMA_ASSERT(index1st < suballocations1st.size());
10038*b7893ccfSSadaf Ebrahimi VmaSuballocation& suballoc = suballocations1st[index1st];
10039*b7893ccfSSadaf Ebrahimi if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10040*b7893ccfSSadaf Ebrahimi {
10041*b7893ccfSSadaf Ebrahimi VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10042*b7893ccfSSadaf Ebrahimi VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10043*b7893ccfSSadaf Ebrahimi if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10044*b7893ccfSSadaf Ebrahimi {
10045*b7893ccfSSadaf Ebrahimi suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10046*b7893ccfSSadaf Ebrahimi suballoc.hAllocation = VK_NULL_HANDLE;
10047*b7893ccfSSadaf Ebrahimi m_SumFreeSize += suballoc.size;
10048*b7893ccfSSadaf Ebrahimi ++m_1stNullItemsMiddleCount;
10049*b7893ccfSSadaf Ebrahimi ++madeLostCount;
10050*b7893ccfSSadaf Ebrahimi }
10051*b7893ccfSSadaf Ebrahimi else
10052*b7893ccfSSadaf Ebrahimi {
10053*b7893ccfSSadaf Ebrahimi return false;
10054*b7893ccfSSadaf Ebrahimi }
10055*b7893ccfSSadaf Ebrahimi }
10056*b7893ccfSSadaf Ebrahimi ++index1st;
10057*b7893ccfSSadaf Ebrahimi }
10058*b7893ccfSSadaf Ebrahimi
10059*b7893ccfSSadaf Ebrahimi CleanupAfterFree();
10060*b7893ccfSSadaf Ebrahimi //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10061*b7893ccfSSadaf Ebrahimi
10062*b7893ccfSSadaf Ebrahimi return true;
10063*b7893ccfSSadaf Ebrahimi }
10064*b7893ccfSSadaf Ebrahimi
MakeAllocationsLost(uint32_t currentFrameIndex,uint32_t frameInUseCount)10065*b7893ccfSSadaf Ebrahimi uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10066*b7893ccfSSadaf Ebrahimi {
10067*b7893ccfSSadaf Ebrahimi uint32_t lostAllocationCount = 0;
10068*b7893ccfSSadaf Ebrahimi
10069*b7893ccfSSadaf Ebrahimi SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10070*b7893ccfSSadaf Ebrahimi for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10071*b7893ccfSSadaf Ebrahimi {
10072*b7893ccfSSadaf Ebrahimi VmaSuballocation& suballoc = suballocations1st[i];
10073*b7893ccfSSadaf Ebrahimi if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10074*b7893ccfSSadaf Ebrahimi suballoc.hAllocation->CanBecomeLost() &&
10075*b7893ccfSSadaf Ebrahimi suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10076*b7893ccfSSadaf Ebrahimi {
10077*b7893ccfSSadaf Ebrahimi suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10078*b7893ccfSSadaf Ebrahimi suballoc.hAllocation = VK_NULL_HANDLE;
10079*b7893ccfSSadaf Ebrahimi ++m_1stNullItemsMiddleCount;
10080*b7893ccfSSadaf Ebrahimi m_SumFreeSize += suballoc.size;
10081*b7893ccfSSadaf Ebrahimi ++lostAllocationCount;
10082*b7893ccfSSadaf Ebrahimi }
10083*b7893ccfSSadaf Ebrahimi }
10084*b7893ccfSSadaf Ebrahimi
10085*b7893ccfSSadaf Ebrahimi SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10086*b7893ccfSSadaf Ebrahimi for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10087*b7893ccfSSadaf Ebrahimi {
10088*b7893ccfSSadaf Ebrahimi VmaSuballocation& suballoc = suballocations2nd[i];
10089*b7893ccfSSadaf Ebrahimi if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10090*b7893ccfSSadaf Ebrahimi suballoc.hAllocation->CanBecomeLost() &&
10091*b7893ccfSSadaf Ebrahimi suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10092*b7893ccfSSadaf Ebrahimi {
10093*b7893ccfSSadaf Ebrahimi suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10094*b7893ccfSSadaf Ebrahimi suballoc.hAllocation = VK_NULL_HANDLE;
10095*b7893ccfSSadaf Ebrahimi ++m_2ndNullItemsCount;
10096*b7893ccfSSadaf Ebrahimi ++lostAllocationCount;
10097*b7893ccfSSadaf Ebrahimi }
10098*b7893ccfSSadaf Ebrahimi }
10099*b7893ccfSSadaf Ebrahimi
10100*b7893ccfSSadaf Ebrahimi if(lostAllocationCount)
10101*b7893ccfSSadaf Ebrahimi {
10102*b7893ccfSSadaf Ebrahimi CleanupAfterFree();
10103*b7893ccfSSadaf Ebrahimi }
10104*b7893ccfSSadaf Ebrahimi
10105*b7893ccfSSadaf Ebrahimi return lostAllocationCount;
10106*b7893ccfSSadaf Ebrahimi }
10107*b7893ccfSSadaf Ebrahimi
CheckCorruption(const void * pBlockData)10108*b7893ccfSSadaf Ebrahimi VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10109*b7893ccfSSadaf Ebrahimi {
10110*b7893ccfSSadaf Ebrahimi SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10111*b7893ccfSSadaf Ebrahimi for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10112*b7893ccfSSadaf Ebrahimi {
10113*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = suballocations1st[i];
10114*b7893ccfSSadaf Ebrahimi if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10115*b7893ccfSSadaf Ebrahimi {
10116*b7893ccfSSadaf Ebrahimi if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10117*b7893ccfSSadaf Ebrahimi {
10118*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10119*b7893ccfSSadaf Ebrahimi return VK_ERROR_VALIDATION_FAILED_EXT;
10120*b7893ccfSSadaf Ebrahimi }
10121*b7893ccfSSadaf Ebrahimi if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10122*b7893ccfSSadaf Ebrahimi {
10123*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10124*b7893ccfSSadaf Ebrahimi return VK_ERROR_VALIDATION_FAILED_EXT;
10125*b7893ccfSSadaf Ebrahimi }
10126*b7893ccfSSadaf Ebrahimi }
10127*b7893ccfSSadaf Ebrahimi }
10128*b7893ccfSSadaf Ebrahimi
10129*b7893ccfSSadaf Ebrahimi SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10130*b7893ccfSSadaf Ebrahimi for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10131*b7893ccfSSadaf Ebrahimi {
10132*b7893ccfSSadaf Ebrahimi const VmaSuballocation& suballoc = suballocations2nd[i];
10133*b7893ccfSSadaf Ebrahimi if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10134*b7893ccfSSadaf Ebrahimi {
10135*b7893ccfSSadaf Ebrahimi if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10136*b7893ccfSSadaf Ebrahimi {
10137*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10138*b7893ccfSSadaf Ebrahimi return VK_ERROR_VALIDATION_FAILED_EXT;
10139*b7893ccfSSadaf Ebrahimi }
10140*b7893ccfSSadaf Ebrahimi if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10141*b7893ccfSSadaf Ebrahimi {
10142*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10143*b7893ccfSSadaf Ebrahimi return VK_ERROR_VALIDATION_FAILED_EXT;
10144*b7893ccfSSadaf Ebrahimi }
10145*b7893ccfSSadaf Ebrahimi }
10146*b7893ccfSSadaf Ebrahimi }
10147*b7893ccfSSadaf Ebrahimi
10148*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
10149*b7893ccfSSadaf Ebrahimi }
10150*b7893ccfSSadaf Ebrahimi
Alloc(const VmaAllocationRequest & request,VmaSuballocationType type,VkDeviceSize allocSize,bool upperAddress,VmaAllocation hAllocation)10151*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Linear::Alloc(
10152*b7893ccfSSadaf Ebrahimi const VmaAllocationRequest& request,
10153*b7893ccfSSadaf Ebrahimi VmaSuballocationType type,
10154*b7893ccfSSadaf Ebrahimi VkDeviceSize allocSize,
10155*b7893ccfSSadaf Ebrahimi bool upperAddress,
10156*b7893ccfSSadaf Ebrahimi VmaAllocation hAllocation)
10157*b7893ccfSSadaf Ebrahimi {
10158*b7893ccfSSadaf Ebrahimi const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10159*b7893ccfSSadaf Ebrahimi
10160*b7893ccfSSadaf Ebrahimi if(upperAddress)
10161*b7893ccfSSadaf Ebrahimi {
10162*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10163*b7893ccfSSadaf Ebrahimi "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10164*b7893ccfSSadaf Ebrahimi SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10165*b7893ccfSSadaf Ebrahimi suballocations2nd.push_back(newSuballoc);
10166*b7893ccfSSadaf Ebrahimi m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10167*b7893ccfSSadaf Ebrahimi }
10168*b7893ccfSSadaf Ebrahimi else
10169*b7893ccfSSadaf Ebrahimi {
10170*b7893ccfSSadaf Ebrahimi SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10171*b7893ccfSSadaf Ebrahimi
10172*b7893ccfSSadaf Ebrahimi // First allocation.
10173*b7893ccfSSadaf Ebrahimi if(suballocations1st.empty())
10174*b7893ccfSSadaf Ebrahimi {
10175*b7893ccfSSadaf Ebrahimi suballocations1st.push_back(newSuballoc);
10176*b7893ccfSSadaf Ebrahimi }
10177*b7893ccfSSadaf Ebrahimi else
10178*b7893ccfSSadaf Ebrahimi {
10179*b7893ccfSSadaf Ebrahimi // New allocation at the end of 1st vector.
10180*b7893ccfSSadaf Ebrahimi if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
10181*b7893ccfSSadaf Ebrahimi {
10182*b7893ccfSSadaf Ebrahimi // Check if it fits before the end of the block.
10183*b7893ccfSSadaf Ebrahimi VMA_ASSERT(request.offset + allocSize <= GetSize());
10184*b7893ccfSSadaf Ebrahimi suballocations1st.push_back(newSuballoc);
10185*b7893ccfSSadaf Ebrahimi }
10186*b7893ccfSSadaf Ebrahimi // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10187*b7893ccfSSadaf Ebrahimi else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
10188*b7893ccfSSadaf Ebrahimi {
10189*b7893ccfSSadaf Ebrahimi SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10190*b7893ccfSSadaf Ebrahimi
10191*b7893ccfSSadaf Ebrahimi switch(m_2ndVectorMode)
10192*b7893ccfSSadaf Ebrahimi {
10193*b7893ccfSSadaf Ebrahimi case SECOND_VECTOR_EMPTY:
10194*b7893ccfSSadaf Ebrahimi // First allocation from second part ring buffer.
10195*b7893ccfSSadaf Ebrahimi VMA_ASSERT(suballocations2nd.empty());
10196*b7893ccfSSadaf Ebrahimi m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10197*b7893ccfSSadaf Ebrahimi break;
10198*b7893ccfSSadaf Ebrahimi case SECOND_VECTOR_RING_BUFFER:
10199*b7893ccfSSadaf Ebrahimi // 2-part ring buffer is already started.
10200*b7893ccfSSadaf Ebrahimi VMA_ASSERT(!suballocations2nd.empty());
10201*b7893ccfSSadaf Ebrahimi break;
10202*b7893ccfSSadaf Ebrahimi case SECOND_VECTOR_DOUBLE_STACK:
10203*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10204*b7893ccfSSadaf Ebrahimi break;
10205*b7893ccfSSadaf Ebrahimi default:
10206*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
10207*b7893ccfSSadaf Ebrahimi }
10208*b7893ccfSSadaf Ebrahimi
10209*b7893ccfSSadaf Ebrahimi suballocations2nd.push_back(newSuballoc);
10210*b7893ccfSSadaf Ebrahimi }
10211*b7893ccfSSadaf Ebrahimi else
10212*b7893ccfSSadaf Ebrahimi {
10213*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10214*b7893ccfSSadaf Ebrahimi }
10215*b7893ccfSSadaf Ebrahimi }
10216*b7893ccfSSadaf Ebrahimi }
10217*b7893ccfSSadaf Ebrahimi
10218*b7893ccfSSadaf Ebrahimi m_SumFreeSize -= newSuballoc.size;
10219*b7893ccfSSadaf Ebrahimi }
10220*b7893ccfSSadaf Ebrahimi
Free(const VmaAllocation allocation)10221*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10222*b7893ccfSSadaf Ebrahimi {
10223*b7893ccfSSadaf Ebrahimi FreeAtOffset(allocation->GetOffset());
10224*b7893ccfSSadaf Ebrahimi }
10225*b7893ccfSSadaf Ebrahimi
FreeAtOffset(VkDeviceSize offset)10226*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10227*b7893ccfSSadaf Ebrahimi {
10228*b7893ccfSSadaf Ebrahimi SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10229*b7893ccfSSadaf Ebrahimi SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10230*b7893ccfSSadaf Ebrahimi
10231*b7893ccfSSadaf Ebrahimi if(!suballocations1st.empty())
10232*b7893ccfSSadaf Ebrahimi {
10233*b7893ccfSSadaf Ebrahimi // First allocation: Mark it as next empty at the beginning.
10234*b7893ccfSSadaf Ebrahimi VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10235*b7893ccfSSadaf Ebrahimi if(firstSuballoc.offset == offset)
10236*b7893ccfSSadaf Ebrahimi {
10237*b7893ccfSSadaf Ebrahimi firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10238*b7893ccfSSadaf Ebrahimi firstSuballoc.hAllocation = VK_NULL_HANDLE;
10239*b7893ccfSSadaf Ebrahimi m_SumFreeSize += firstSuballoc.size;
10240*b7893ccfSSadaf Ebrahimi ++m_1stNullItemsBeginCount;
10241*b7893ccfSSadaf Ebrahimi CleanupAfterFree();
10242*b7893ccfSSadaf Ebrahimi return;
10243*b7893ccfSSadaf Ebrahimi }
10244*b7893ccfSSadaf Ebrahimi }
10245*b7893ccfSSadaf Ebrahimi
10246*b7893ccfSSadaf Ebrahimi // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10247*b7893ccfSSadaf Ebrahimi if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10248*b7893ccfSSadaf Ebrahimi m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10249*b7893ccfSSadaf Ebrahimi {
10250*b7893ccfSSadaf Ebrahimi VmaSuballocation& lastSuballoc = suballocations2nd.back();
10251*b7893ccfSSadaf Ebrahimi if(lastSuballoc.offset == offset)
10252*b7893ccfSSadaf Ebrahimi {
10253*b7893ccfSSadaf Ebrahimi m_SumFreeSize += lastSuballoc.size;
10254*b7893ccfSSadaf Ebrahimi suballocations2nd.pop_back();
10255*b7893ccfSSadaf Ebrahimi CleanupAfterFree();
10256*b7893ccfSSadaf Ebrahimi return;
10257*b7893ccfSSadaf Ebrahimi }
10258*b7893ccfSSadaf Ebrahimi }
10259*b7893ccfSSadaf Ebrahimi // Last allocation in 1st vector.
10260*b7893ccfSSadaf Ebrahimi else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10261*b7893ccfSSadaf Ebrahimi {
10262*b7893ccfSSadaf Ebrahimi VmaSuballocation& lastSuballoc = suballocations1st.back();
10263*b7893ccfSSadaf Ebrahimi if(lastSuballoc.offset == offset)
10264*b7893ccfSSadaf Ebrahimi {
10265*b7893ccfSSadaf Ebrahimi m_SumFreeSize += lastSuballoc.size;
10266*b7893ccfSSadaf Ebrahimi suballocations1st.pop_back();
10267*b7893ccfSSadaf Ebrahimi CleanupAfterFree();
10268*b7893ccfSSadaf Ebrahimi return;
10269*b7893ccfSSadaf Ebrahimi }
10270*b7893ccfSSadaf Ebrahimi }
10271*b7893ccfSSadaf Ebrahimi
10272*b7893ccfSSadaf Ebrahimi // Item from the middle of 1st vector.
10273*b7893ccfSSadaf Ebrahimi {
10274*b7893ccfSSadaf Ebrahimi VmaSuballocation refSuballoc;
10275*b7893ccfSSadaf Ebrahimi refSuballoc.offset = offset;
10276*b7893ccfSSadaf Ebrahimi // Rest of members stays uninitialized intentionally for better performance.
10277*b7893ccfSSadaf Ebrahimi SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10278*b7893ccfSSadaf Ebrahimi suballocations1st.begin() + m_1stNullItemsBeginCount,
10279*b7893ccfSSadaf Ebrahimi suballocations1st.end(),
10280*b7893ccfSSadaf Ebrahimi refSuballoc);
10281*b7893ccfSSadaf Ebrahimi if(it != suballocations1st.end())
10282*b7893ccfSSadaf Ebrahimi {
10283*b7893ccfSSadaf Ebrahimi it->type = VMA_SUBALLOCATION_TYPE_FREE;
10284*b7893ccfSSadaf Ebrahimi it->hAllocation = VK_NULL_HANDLE;
10285*b7893ccfSSadaf Ebrahimi ++m_1stNullItemsMiddleCount;
10286*b7893ccfSSadaf Ebrahimi m_SumFreeSize += it->size;
10287*b7893ccfSSadaf Ebrahimi CleanupAfterFree();
10288*b7893ccfSSadaf Ebrahimi return;
10289*b7893ccfSSadaf Ebrahimi }
10290*b7893ccfSSadaf Ebrahimi }
10291*b7893ccfSSadaf Ebrahimi
10292*b7893ccfSSadaf Ebrahimi if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10293*b7893ccfSSadaf Ebrahimi {
10294*b7893ccfSSadaf Ebrahimi // Item from the middle of 2nd vector.
10295*b7893ccfSSadaf Ebrahimi VmaSuballocation refSuballoc;
10296*b7893ccfSSadaf Ebrahimi refSuballoc.offset = offset;
10297*b7893ccfSSadaf Ebrahimi // Rest of members stays uninitialized intentionally for better performance.
10298*b7893ccfSSadaf Ebrahimi SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10299*b7893ccfSSadaf Ebrahimi VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10300*b7893ccfSSadaf Ebrahimi VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10301*b7893ccfSSadaf Ebrahimi if(it != suballocations2nd.end())
10302*b7893ccfSSadaf Ebrahimi {
10303*b7893ccfSSadaf Ebrahimi it->type = VMA_SUBALLOCATION_TYPE_FREE;
10304*b7893ccfSSadaf Ebrahimi it->hAllocation = VK_NULL_HANDLE;
10305*b7893ccfSSadaf Ebrahimi ++m_2ndNullItemsCount;
10306*b7893ccfSSadaf Ebrahimi m_SumFreeSize += it->size;
10307*b7893ccfSSadaf Ebrahimi CleanupAfterFree();
10308*b7893ccfSSadaf Ebrahimi return;
10309*b7893ccfSSadaf Ebrahimi }
10310*b7893ccfSSadaf Ebrahimi }
10311*b7893ccfSSadaf Ebrahimi
10312*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10313*b7893ccfSSadaf Ebrahimi }
10314*b7893ccfSSadaf Ebrahimi
ShouldCompact1st()10315*b7893ccfSSadaf Ebrahimi bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10316*b7893ccfSSadaf Ebrahimi {
10317*b7893ccfSSadaf Ebrahimi const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10318*b7893ccfSSadaf Ebrahimi const size_t suballocCount = AccessSuballocations1st().size();
10319*b7893ccfSSadaf Ebrahimi return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10320*b7893ccfSSadaf Ebrahimi }
10321*b7893ccfSSadaf Ebrahimi
CleanupAfterFree()10322*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Linear::CleanupAfterFree()
10323*b7893ccfSSadaf Ebrahimi {
10324*b7893ccfSSadaf Ebrahimi SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10325*b7893ccfSSadaf Ebrahimi SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10326*b7893ccfSSadaf Ebrahimi
10327*b7893ccfSSadaf Ebrahimi if(IsEmpty())
10328*b7893ccfSSadaf Ebrahimi {
10329*b7893ccfSSadaf Ebrahimi suballocations1st.clear();
10330*b7893ccfSSadaf Ebrahimi suballocations2nd.clear();
10331*b7893ccfSSadaf Ebrahimi m_1stNullItemsBeginCount = 0;
10332*b7893ccfSSadaf Ebrahimi m_1stNullItemsMiddleCount = 0;
10333*b7893ccfSSadaf Ebrahimi m_2ndNullItemsCount = 0;
10334*b7893ccfSSadaf Ebrahimi m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10335*b7893ccfSSadaf Ebrahimi }
10336*b7893ccfSSadaf Ebrahimi else
10337*b7893ccfSSadaf Ebrahimi {
10338*b7893ccfSSadaf Ebrahimi const size_t suballoc1stCount = suballocations1st.size();
10339*b7893ccfSSadaf Ebrahimi const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10340*b7893ccfSSadaf Ebrahimi VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10341*b7893ccfSSadaf Ebrahimi
10342*b7893ccfSSadaf Ebrahimi // Find more null items at the beginning of 1st vector.
10343*b7893ccfSSadaf Ebrahimi while(m_1stNullItemsBeginCount < suballoc1stCount &&
10344*b7893ccfSSadaf Ebrahimi suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10345*b7893ccfSSadaf Ebrahimi {
10346*b7893ccfSSadaf Ebrahimi ++m_1stNullItemsBeginCount;
10347*b7893ccfSSadaf Ebrahimi --m_1stNullItemsMiddleCount;
10348*b7893ccfSSadaf Ebrahimi }
10349*b7893ccfSSadaf Ebrahimi
10350*b7893ccfSSadaf Ebrahimi // Find more null items at the end of 1st vector.
10351*b7893ccfSSadaf Ebrahimi while(m_1stNullItemsMiddleCount > 0 &&
10352*b7893ccfSSadaf Ebrahimi suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10353*b7893ccfSSadaf Ebrahimi {
10354*b7893ccfSSadaf Ebrahimi --m_1stNullItemsMiddleCount;
10355*b7893ccfSSadaf Ebrahimi suballocations1st.pop_back();
10356*b7893ccfSSadaf Ebrahimi }
10357*b7893ccfSSadaf Ebrahimi
10358*b7893ccfSSadaf Ebrahimi // Find more null items at the end of 2nd vector.
10359*b7893ccfSSadaf Ebrahimi while(m_2ndNullItemsCount > 0 &&
10360*b7893ccfSSadaf Ebrahimi suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10361*b7893ccfSSadaf Ebrahimi {
10362*b7893ccfSSadaf Ebrahimi --m_2ndNullItemsCount;
10363*b7893ccfSSadaf Ebrahimi suballocations2nd.pop_back();
10364*b7893ccfSSadaf Ebrahimi }
10365*b7893ccfSSadaf Ebrahimi
10366*b7893ccfSSadaf Ebrahimi if(ShouldCompact1st())
10367*b7893ccfSSadaf Ebrahimi {
10368*b7893ccfSSadaf Ebrahimi const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10369*b7893ccfSSadaf Ebrahimi size_t srcIndex = m_1stNullItemsBeginCount;
10370*b7893ccfSSadaf Ebrahimi for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10371*b7893ccfSSadaf Ebrahimi {
10372*b7893ccfSSadaf Ebrahimi while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10373*b7893ccfSSadaf Ebrahimi {
10374*b7893ccfSSadaf Ebrahimi ++srcIndex;
10375*b7893ccfSSadaf Ebrahimi }
10376*b7893ccfSSadaf Ebrahimi if(dstIndex != srcIndex)
10377*b7893ccfSSadaf Ebrahimi {
10378*b7893ccfSSadaf Ebrahimi suballocations1st[dstIndex] = suballocations1st[srcIndex];
10379*b7893ccfSSadaf Ebrahimi }
10380*b7893ccfSSadaf Ebrahimi ++srcIndex;
10381*b7893ccfSSadaf Ebrahimi }
10382*b7893ccfSSadaf Ebrahimi suballocations1st.resize(nonNullItemCount);
10383*b7893ccfSSadaf Ebrahimi m_1stNullItemsBeginCount = 0;
10384*b7893ccfSSadaf Ebrahimi m_1stNullItemsMiddleCount = 0;
10385*b7893ccfSSadaf Ebrahimi }
10386*b7893ccfSSadaf Ebrahimi
10387*b7893ccfSSadaf Ebrahimi // 2nd vector became empty.
10388*b7893ccfSSadaf Ebrahimi if(suballocations2nd.empty())
10389*b7893ccfSSadaf Ebrahimi {
10390*b7893ccfSSadaf Ebrahimi m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10391*b7893ccfSSadaf Ebrahimi }
10392*b7893ccfSSadaf Ebrahimi
10393*b7893ccfSSadaf Ebrahimi // 1st vector became empty.
10394*b7893ccfSSadaf Ebrahimi if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10395*b7893ccfSSadaf Ebrahimi {
10396*b7893ccfSSadaf Ebrahimi suballocations1st.clear();
10397*b7893ccfSSadaf Ebrahimi m_1stNullItemsBeginCount = 0;
10398*b7893ccfSSadaf Ebrahimi
10399*b7893ccfSSadaf Ebrahimi if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10400*b7893ccfSSadaf Ebrahimi {
10401*b7893ccfSSadaf Ebrahimi // Swap 1st with 2nd. Now 2nd is empty.
10402*b7893ccfSSadaf Ebrahimi m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10403*b7893ccfSSadaf Ebrahimi m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10404*b7893ccfSSadaf Ebrahimi while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10405*b7893ccfSSadaf Ebrahimi suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10406*b7893ccfSSadaf Ebrahimi {
10407*b7893ccfSSadaf Ebrahimi ++m_1stNullItemsBeginCount;
10408*b7893ccfSSadaf Ebrahimi --m_1stNullItemsMiddleCount;
10409*b7893ccfSSadaf Ebrahimi }
10410*b7893ccfSSadaf Ebrahimi m_2ndNullItemsCount = 0;
10411*b7893ccfSSadaf Ebrahimi m_1stVectorIndex ^= 1;
10412*b7893ccfSSadaf Ebrahimi }
10413*b7893ccfSSadaf Ebrahimi }
10414*b7893ccfSSadaf Ebrahimi }
10415*b7893ccfSSadaf Ebrahimi
10416*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(Validate());
10417*b7893ccfSSadaf Ebrahimi }
10418*b7893ccfSSadaf Ebrahimi
10419*b7893ccfSSadaf Ebrahimi
10420*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
10421*b7893ccfSSadaf Ebrahimi // class VmaBlockMetadata_Buddy
10422*b7893ccfSSadaf Ebrahimi
VmaBlockMetadata_Buddy(VmaAllocator hAllocator)10423*b7893ccfSSadaf Ebrahimi VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10424*b7893ccfSSadaf Ebrahimi VmaBlockMetadata(hAllocator),
10425*b7893ccfSSadaf Ebrahimi m_Root(VMA_NULL),
10426*b7893ccfSSadaf Ebrahimi m_AllocationCount(0),
10427*b7893ccfSSadaf Ebrahimi m_FreeCount(1),
10428*b7893ccfSSadaf Ebrahimi m_SumFreeSize(0)
10429*b7893ccfSSadaf Ebrahimi {
10430*b7893ccfSSadaf Ebrahimi memset(m_FreeList, 0, sizeof(m_FreeList));
10431*b7893ccfSSadaf Ebrahimi }
10432*b7893ccfSSadaf Ebrahimi
~VmaBlockMetadata_Buddy()10433*b7893ccfSSadaf Ebrahimi VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10434*b7893ccfSSadaf Ebrahimi {
10435*b7893ccfSSadaf Ebrahimi DeleteNode(m_Root);
10436*b7893ccfSSadaf Ebrahimi }
10437*b7893ccfSSadaf Ebrahimi
Init(VkDeviceSize size)10438*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10439*b7893ccfSSadaf Ebrahimi {
10440*b7893ccfSSadaf Ebrahimi VmaBlockMetadata::Init(size);
10441*b7893ccfSSadaf Ebrahimi
10442*b7893ccfSSadaf Ebrahimi m_UsableSize = VmaPrevPow2(size);
10443*b7893ccfSSadaf Ebrahimi m_SumFreeSize = m_UsableSize;
10444*b7893ccfSSadaf Ebrahimi
10445*b7893ccfSSadaf Ebrahimi // Calculate m_LevelCount.
10446*b7893ccfSSadaf Ebrahimi m_LevelCount = 1;
10447*b7893ccfSSadaf Ebrahimi while(m_LevelCount < MAX_LEVELS &&
10448*b7893ccfSSadaf Ebrahimi LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10449*b7893ccfSSadaf Ebrahimi {
10450*b7893ccfSSadaf Ebrahimi ++m_LevelCount;
10451*b7893ccfSSadaf Ebrahimi }
10452*b7893ccfSSadaf Ebrahimi
10453*b7893ccfSSadaf Ebrahimi Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10454*b7893ccfSSadaf Ebrahimi rootNode->offset = 0;
10455*b7893ccfSSadaf Ebrahimi rootNode->type = Node::TYPE_FREE;
10456*b7893ccfSSadaf Ebrahimi rootNode->parent = VMA_NULL;
10457*b7893ccfSSadaf Ebrahimi rootNode->buddy = VMA_NULL;
10458*b7893ccfSSadaf Ebrahimi
10459*b7893ccfSSadaf Ebrahimi m_Root = rootNode;
10460*b7893ccfSSadaf Ebrahimi AddToFreeListFront(0, rootNode);
10461*b7893ccfSSadaf Ebrahimi }
10462*b7893ccfSSadaf Ebrahimi
Validate()10463*b7893ccfSSadaf Ebrahimi bool VmaBlockMetadata_Buddy::Validate() const
10464*b7893ccfSSadaf Ebrahimi {
10465*b7893ccfSSadaf Ebrahimi // Validate tree.
10466*b7893ccfSSadaf Ebrahimi ValidationContext ctx;
10467*b7893ccfSSadaf Ebrahimi if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10468*b7893ccfSSadaf Ebrahimi {
10469*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(false && "ValidateNode failed.");
10470*b7893ccfSSadaf Ebrahimi }
10471*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10472*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10473*b7893ccfSSadaf Ebrahimi
10474*b7893ccfSSadaf Ebrahimi // Validate free node lists.
10475*b7893ccfSSadaf Ebrahimi for(uint32_t level = 0; level < m_LevelCount; ++level)
10476*b7893ccfSSadaf Ebrahimi {
10477*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10478*b7893ccfSSadaf Ebrahimi m_FreeList[level].front->free.prev == VMA_NULL);
10479*b7893ccfSSadaf Ebrahimi
10480*b7893ccfSSadaf Ebrahimi for(Node* node = m_FreeList[level].front;
10481*b7893ccfSSadaf Ebrahimi node != VMA_NULL;
10482*b7893ccfSSadaf Ebrahimi node = node->free.next)
10483*b7893ccfSSadaf Ebrahimi {
10484*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(node->type == Node::TYPE_FREE);
10485*b7893ccfSSadaf Ebrahimi
10486*b7893ccfSSadaf Ebrahimi if(node->free.next == VMA_NULL)
10487*b7893ccfSSadaf Ebrahimi {
10488*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(m_FreeList[level].back == node);
10489*b7893ccfSSadaf Ebrahimi }
10490*b7893ccfSSadaf Ebrahimi else
10491*b7893ccfSSadaf Ebrahimi {
10492*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(node->free.next->free.prev == node);
10493*b7893ccfSSadaf Ebrahimi }
10494*b7893ccfSSadaf Ebrahimi }
10495*b7893ccfSSadaf Ebrahimi }
10496*b7893ccfSSadaf Ebrahimi
10497*b7893ccfSSadaf Ebrahimi // Validate that free lists ar higher levels are empty.
10498*b7893ccfSSadaf Ebrahimi for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10499*b7893ccfSSadaf Ebrahimi {
10500*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10501*b7893ccfSSadaf Ebrahimi }
10502*b7893ccfSSadaf Ebrahimi
10503*b7893ccfSSadaf Ebrahimi return true;
10504*b7893ccfSSadaf Ebrahimi }
10505*b7893ccfSSadaf Ebrahimi
GetUnusedRangeSizeMax()10506*b7893ccfSSadaf Ebrahimi VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10507*b7893ccfSSadaf Ebrahimi {
10508*b7893ccfSSadaf Ebrahimi for(uint32_t level = 0; level < m_LevelCount; ++level)
10509*b7893ccfSSadaf Ebrahimi {
10510*b7893ccfSSadaf Ebrahimi if(m_FreeList[level].front != VMA_NULL)
10511*b7893ccfSSadaf Ebrahimi {
10512*b7893ccfSSadaf Ebrahimi return LevelToNodeSize(level);
10513*b7893ccfSSadaf Ebrahimi }
10514*b7893ccfSSadaf Ebrahimi }
10515*b7893ccfSSadaf Ebrahimi return 0;
10516*b7893ccfSSadaf Ebrahimi }
10517*b7893ccfSSadaf Ebrahimi
CalcAllocationStatInfo(VmaStatInfo & outInfo)10518*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10519*b7893ccfSSadaf Ebrahimi {
10520*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusableSize = GetUnusableSize();
10521*b7893ccfSSadaf Ebrahimi
10522*b7893ccfSSadaf Ebrahimi outInfo.blockCount = 1;
10523*b7893ccfSSadaf Ebrahimi
10524*b7893ccfSSadaf Ebrahimi outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10525*b7893ccfSSadaf Ebrahimi outInfo.usedBytes = outInfo.unusedBytes = 0;
10526*b7893ccfSSadaf Ebrahimi
10527*b7893ccfSSadaf Ebrahimi outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10528*b7893ccfSSadaf Ebrahimi outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10529*b7893ccfSSadaf Ebrahimi outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10530*b7893ccfSSadaf Ebrahimi
10531*b7893ccfSSadaf Ebrahimi CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10532*b7893ccfSSadaf Ebrahimi
10533*b7893ccfSSadaf Ebrahimi if(unusableSize > 0)
10534*b7893ccfSSadaf Ebrahimi {
10535*b7893ccfSSadaf Ebrahimi ++outInfo.unusedRangeCount;
10536*b7893ccfSSadaf Ebrahimi outInfo.unusedBytes += unusableSize;
10537*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10538*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10539*b7893ccfSSadaf Ebrahimi }
10540*b7893ccfSSadaf Ebrahimi }
10541*b7893ccfSSadaf Ebrahimi
AddPoolStats(VmaPoolStats & inoutStats)10542*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10543*b7893ccfSSadaf Ebrahimi {
10544*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusableSize = GetUnusableSize();
10545*b7893ccfSSadaf Ebrahimi
10546*b7893ccfSSadaf Ebrahimi inoutStats.size += GetSize();
10547*b7893ccfSSadaf Ebrahimi inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10548*b7893ccfSSadaf Ebrahimi inoutStats.allocationCount += m_AllocationCount;
10549*b7893ccfSSadaf Ebrahimi inoutStats.unusedRangeCount += m_FreeCount;
10550*b7893ccfSSadaf Ebrahimi inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10551*b7893ccfSSadaf Ebrahimi
10552*b7893ccfSSadaf Ebrahimi if(unusableSize > 0)
10553*b7893ccfSSadaf Ebrahimi {
10554*b7893ccfSSadaf Ebrahimi ++inoutStats.unusedRangeCount;
10555*b7893ccfSSadaf Ebrahimi // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10556*b7893ccfSSadaf Ebrahimi }
10557*b7893ccfSSadaf Ebrahimi }
10558*b7893ccfSSadaf Ebrahimi
10559*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
10560*b7893ccfSSadaf Ebrahimi
PrintDetailedMap(class VmaJsonWriter & json)10561*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10562*b7893ccfSSadaf Ebrahimi {
10563*b7893ccfSSadaf Ebrahimi // TODO optimize
10564*b7893ccfSSadaf Ebrahimi VmaStatInfo stat;
10565*b7893ccfSSadaf Ebrahimi CalcAllocationStatInfo(stat);
10566*b7893ccfSSadaf Ebrahimi
10567*b7893ccfSSadaf Ebrahimi PrintDetailedMap_Begin(
10568*b7893ccfSSadaf Ebrahimi json,
10569*b7893ccfSSadaf Ebrahimi stat.unusedBytes,
10570*b7893ccfSSadaf Ebrahimi stat.allocationCount,
10571*b7893ccfSSadaf Ebrahimi stat.unusedRangeCount);
10572*b7893ccfSSadaf Ebrahimi
10573*b7893ccfSSadaf Ebrahimi PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10574*b7893ccfSSadaf Ebrahimi
10575*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusableSize = GetUnusableSize();
10576*b7893ccfSSadaf Ebrahimi if(unusableSize > 0)
10577*b7893ccfSSadaf Ebrahimi {
10578*b7893ccfSSadaf Ebrahimi PrintDetailedMap_UnusedRange(json,
10579*b7893ccfSSadaf Ebrahimi m_UsableSize, // offset
10580*b7893ccfSSadaf Ebrahimi unusableSize); // size
10581*b7893ccfSSadaf Ebrahimi }
10582*b7893ccfSSadaf Ebrahimi
10583*b7893ccfSSadaf Ebrahimi PrintDetailedMap_End(json);
10584*b7893ccfSSadaf Ebrahimi }
10585*b7893ccfSSadaf Ebrahimi
10586*b7893ccfSSadaf Ebrahimi #endif // #if VMA_STATS_STRING_ENABLED
10587*b7893ccfSSadaf Ebrahimi
CreateAllocationRequest(uint32_t currentFrameIndex,uint32_t frameInUseCount,VkDeviceSize bufferImageGranularity,VkDeviceSize allocSize,VkDeviceSize allocAlignment,bool upperAddress,VmaSuballocationType allocType,bool canMakeOtherLost,uint32_t strategy,VmaAllocationRequest * pAllocationRequest)10588*b7893ccfSSadaf Ebrahimi bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10589*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
10590*b7893ccfSSadaf Ebrahimi uint32_t frameInUseCount,
10591*b7893ccfSSadaf Ebrahimi VkDeviceSize bufferImageGranularity,
10592*b7893ccfSSadaf Ebrahimi VkDeviceSize allocSize,
10593*b7893ccfSSadaf Ebrahimi VkDeviceSize allocAlignment,
10594*b7893ccfSSadaf Ebrahimi bool upperAddress,
10595*b7893ccfSSadaf Ebrahimi VmaSuballocationType allocType,
10596*b7893ccfSSadaf Ebrahimi bool canMakeOtherLost,
10597*b7893ccfSSadaf Ebrahimi uint32_t strategy,
10598*b7893ccfSSadaf Ebrahimi VmaAllocationRequest* pAllocationRequest)
10599*b7893ccfSSadaf Ebrahimi {
10600*b7893ccfSSadaf Ebrahimi VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10601*b7893ccfSSadaf Ebrahimi
10602*b7893ccfSSadaf Ebrahimi // Simple way to respect bufferImageGranularity. May be optimized some day.
10603*b7893ccfSSadaf Ebrahimi // Whenever it might be an OPTIMAL image...
10604*b7893ccfSSadaf Ebrahimi if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10605*b7893ccfSSadaf Ebrahimi allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10606*b7893ccfSSadaf Ebrahimi allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10607*b7893ccfSSadaf Ebrahimi {
10608*b7893ccfSSadaf Ebrahimi allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10609*b7893ccfSSadaf Ebrahimi allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10610*b7893ccfSSadaf Ebrahimi }
10611*b7893ccfSSadaf Ebrahimi
10612*b7893ccfSSadaf Ebrahimi if(allocSize > m_UsableSize)
10613*b7893ccfSSadaf Ebrahimi {
10614*b7893ccfSSadaf Ebrahimi return false;
10615*b7893ccfSSadaf Ebrahimi }
10616*b7893ccfSSadaf Ebrahimi
10617*b7893ccfSSadaf Ebrahimi const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10618*b7893ccfSSadaf Ebrahimi for(uint32_t level = targetLevel + 1; level--; )
10619*b7893ccfSSadaf Ebrahimi {
10620*b7893ccfSSadaf Ebrahimi for(Node* freeNode = m_FreeList[level].front;
10621*b7893ccfSSadaf Ebrahimi freeNode != VMA_NULL;
10622*b7893ccfSSadaf Ebrahimi freeNode = freeNode->free.next)
10623*b7893ccfSSadaf Ebrahimi {
10624*b7893ccfSSadaf Ebrahimi if(freeNode->offset % allocAlignment == 0)
10625*b7893ccfSSadaf Ebrahimi {
10626*b7893ccfSSadaf Ebrahimi pAllocationRequest->offset = freeNode->offset;
10627*b7893ccfSSadaf Ebrahimi pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10628*b7893ccfSSadaf Ebrahimi pAllocationRequest->sumItemSize = 0;
10629*b7893ccfSSadaf Ebrahimi pAllocationRequest->itemsToMakeLostCount = 0;
10630*b7893ccfSSadaf Ebrahimi pAllocationRequest->customData = (void*)(uintptr_t)level;
10631*b7893ccfSSadaf Ebrahimi return true;
10632*b7893ccfSSadaf Ebrahimi }
10633*b7893ccfSSadaf Ebrahimi }
10634*b7893ccfSSadaf Ebrahimi }
10635*b7893ccfSSadaf Ebrahimi
10636*b7893ccfSSadaf Ebrahimi return false;
10637*b7893ccfSSadaf Ebrahimi }
10638*b7893ccfSSadaf Ebrahimi
MakeRequestedAllocationsLost(uint32_t currentFrameIndex,uint32_t frameInUseCount,VmaAllocationRequest * pAllocationRequest)10639*b7893ccfSSadaf Ebrahimi bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10640*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
10641*b7893ccfSSadaf Ebrahimi uint32_t frameInUseCount,
10642*b7893ccfSSadaf Ebrahimi VmaAllocationRequest* pAllocationRequest)
10643*b7893ccfSSadaf Ebrahimi {
10644*b7893ccfSSadaf Ebrahimi /*
10645*b7893ccfSSadaf Ebrahimi Lost allocations are not supported in buddy allocator at the moment.
10646*b7893ccfSSadaf Ebrahimi Support might be added in the future.
10647*b7893ccfSSadaf Ebrahimi */
10648*b7893ccfSSadaf Ebrahimi return pAllocationRequest->itemsToMakeLostCount == 0;
10649*b7893ccfSSadaf Ebrahimi }
10650*b7893ccfSSadaf Ebrahimi
MakeAllocationsLost(uint32_t currentFrameIndex,uint32_t frameInUseCount)10651*b7893ccfSSadaf Ebrahimi uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10652*b7893ccfSSadaf Ebrahimi {
10653*b7893ccfSSadaf Ebrahimi /*
10654*b7893ccfSSadaf Ebrahimi Lost allocations are not supported in buddy allocator at the moment.
10655*b7893ccfSSadaf Ebrahimi Support might be added in the future.
10656*b7893ccfSSadaf Ebrahimi */
10657*b7893ccfSSadaf Ebrahimi return 0;
10658*b7893ccfSSadaf Ebrahimi }
10659*b7893ccfSSadaf Ebrahimi
Alloc(const VmaAllocationRequest & request,VmaSuballocationType type,VkDeviceSize allocSize,bool upperAddress,VmaAllocation hAllocation)10660*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Buddy::Alloc(
10661*b7893ccfSSadaf Ebrahimi const VmaAllocationRequest& request,
10662*b7893ccfSSadaf Ebrahimi VmaSuballocationType type,
10663*b7893ccfSSadaf Ebrahimi VkDeviceSize allocSize,
10664*b7893ccfSSadaf Ebrahimi bool upperAddress,
10665*b7893ccfSSadaf Ebrahimi VmaAllocation hAllocation)
10666*b7893ccfSSadaf Ebrahimi {
10667*b7893ccfSSadaf Ebrahimi const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10668*b7893ccfSSadaf Ebrahimi uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10669*b7893ccfSSadaf Ebrahimi
10670*b7893ccfSSadaf Ebrahimi Node* currNode = m_FreeList[currLevel].front;
10671*b7893ccfSSadaf Ebrahimi VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10672*b7893ccfSSadaf Ebrahimi while(currNode->offset != request.offset)
10673*b7893ccfSSadaf Ebrahimi {
10674*b7893ccfSSadaf Ebrahimi currNode = currNode->free.next;
10675*b7893ccfSSadaf Ebrahimi VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10676*b7893ccfSSadaf Ebrahimi }
10677*b7893ccfSSadaf Ebrahimi
10678*b7893ccfSSadaf Ebrahimi // Go down, splitting free nodes.
10679*b7893ccfSSadaf Ebrahimi while(currLevel < targetLevel)
10680*b7893ccfSSadaf Ebrahimi {
10681*b7893ccfSSadaf Ebrahimi // currNode is already first free node at currLevel.
10682*b7893ccfSSadaf Ebrahimi // Remove it from list of free nodes at this currLevel.
10683*b7893ccfSSadaf Ebrahimi RemoveFromFreeList(currLevel, currNode);
10684*b7893ccfSSadaf Ebrahimi
10685*b7893ccfSSadaf Ebrahimi const uint32_t childrenLevel = currLevel + 1;
10686*b7893ccfSSadaf Ebrahimi
10687*b7893ccfSSadaf Ebrahimi // Create two free sub-nodes.
10688*b7893ccfSSadaf Ebrahimi Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10689*b7893ccfSSadaf Ebrahimi Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10690*b7893ccfSSadaf Ebrahimi
10691*b7893ccfSSadaf Ebrahimi leftChild->offset = currNode->offset;
10692*b7893ccfSSadaf Ebrahimi leftChild->type = Node::TYPE_FREE;
10693*b7893ccfSSadaf Ebrahimi leftChild->parent = currNode;
10694*b7893ccfSSadaf Ebrahimi leftChild->buddy = rightChild;
10695*b7893ccfSSadaf Ebrahimi
10696*b7893ccfSSadaf Ebrahimi rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10697*b7893ccfSSadaf Ebrahimi rightChild->type = Node::TYPE_FREE;
10698*b7893ccfSSadaf Ebrahimi rightChild->parent = currNode;
10699*b7893ccfSSadaf Ebrahimi rightChild->buddy = leftChild;
10700*b7893ccfSSadaf Ebrahimi
10701*b7893ccfSSadaf Ebrahimi // Convert current currNode to split type.
10702*b7893ccfSSadaf Ebrahimi currNode->type = Node::TYPE_SPLIT;
10703*b7893ccfSSadaf Ebrahimi currNode->split.leftChild = leftChild;
10704*b7893ccfSSadaf Ebrahimi
10705*b7893ccfSSadaf Ebrahimi // Add child nodes to free list. Order is important!
10706*b7893ccfSSadaf Ebrahimi AddToFreeListFront(childrenLevel, rightChild);
10707*b7893ccfSSadaf Ebrahimi AddToFreeListFront(childrenLevel, leftChild);
10708*b7893ccfSSadaf Ebrahimi
10709*b7893ccfSSadaf Ebrahimi ++m_FreeCount;
10710*b7893ccfSSadaf Ebrahimi //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10711*b7893ccfSSadaf Ebrahimi ++currLevel;
10712*b7893ccfSSadaf Ebrahimi currNode = m_FreeList[currLevel].front;
10713*b7893ccfSSadaf Ebrahimi
10714*b7893ccfSSadaf Ebrahimi /*
10715*b7893ccfSSadaf Ebrahimi We can be sure that currNode, as left child of node previously split,
10716*b7893ccfSSadaf Ebrahimi also fullfills the alignment requirement.
10717*b7893ccfSSadaf Ebrahimi */
10718*b7893ccfSSadaf Ebrahimi }
10719*b7893ccfSSadaf Ebrahimi
10720*b7893ccfSSadaf Ebrahimi // Remove from free list.
10721*b7893ccfSSadaf Ebrahimi VMA_ASSERT(currLevel == targetLevel &&
10722*b7893ccfSSadaf Ebrahimi currNode != VMA_NULL &&
10723*b7893ccfSSadaf Ebrahimi currNode->type == Node::TYPE_FREE);
10724*b7893ccfSSadaf Ebrahimi RemoveFromFreeList(currLevel, currNode);
10725*b7893ccfSSadaf Ebrahimi
10726*b7893ccfSSadaf Ebrahimi // Convert to allocation node.
10727*b7893ccfSSadaf Ebrahimi currNode->type = Node::TYPE_ALLOCATION;
10728*b7893ccfSSadaf Ebrahimi currNode->allocation.alloc = hAllocation;
10729*b7893ccfSSadaf Ebrahimi
10730*b7893ccfSSadaf Ebrahimi ++m_AllocationCount;
10731*b7893ccfSSadaf Ebrahimi --m_FreeCount;
10732*b7893ccfSSadaf Ebrahimi m_SumFreeSize -= allocSize;
10733*b7893ccfSSadaf Ebrahimi }
10734*b7893ccfSSadaf Ebrahimi
DeleteNode(Node * node)10735*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10736*b7893ccfSSadaf Ebrahimi {
10737*b7893ccfSSadaf Ebrahimi if(node->type == Node::TYPE_SPLIT)
10738*b7893ccfSSadaf Ebrahimi {
10739*b7893ccfSSadaf Ebrahimi DeleteNode(node->split.leftChild->buddy);
10740*b7893ccfSSadaf Ebrahimi DeleteNode(node->split.leftChild);
10741*b7893ccfSSadaf Ebrahimi }
10742*b7893ccfSSadaf Ebrahimi
10743*b7893ccfSSadaf Ebrahimi vma_delete(GetAllocationCallbacks(), node);
10744*b7893ccfSSadaf Ebrahimi }
10745*b7893ccfSSadaf Ebrahimi
ValidateNode(ValidationContext & ctx,const Node * parent,const Node * curr,uint32_t level,VkDeviceSize levelNodeSize)10746*b7893ccfSSadaf Ebrahimi bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10747*b7893ccfSSadaf Ebrahimi {
10748*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(level < m_LevelCount);
10749*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(curr->parent == parent);
10750*b7893ccfSSadaf Ebrahimi VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10751*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10752*b7893ccfSSadaf Ebrahimi switch(curr->type)
10753*b7893ccfSSadaf Ebrahimi {
10754*b7893ccfSSadaf Ebrahimi case Node::TYPE_FREE:
10755*b7893ccfSSadaf Ebrahimi // curr->free.prev, next are validated separately.
10756*b7893ccfSSadaf Ebrahimi ctx.calculatedSumFreeSize += levelNodeSize;
10757*b7893ccfSSadaf Ebrahimi ++ctx.calculatedFreeCount;
10758*b7893ccfSSadaf Ebrahimi break;
10759*b7893ccfSSadaf Ebrahimi case Node::TYPE_ALLOCATION:
10760*b7893ccfSSadaf Ebrahimi ++ctx.calculatedAllocationCount;
10761*b7893ccfSSadaf Ebrahimi ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10762*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10763*b7893ccfSSadaf Ebrahimi break;
10764*b7893ccfSSadaf Ebrahimi case Node::TYPE_SPLIT:
10765*b7893ccfSSadaf Ebrahimi {
10766*b7893ccfSSadaf Ebrahimi const uint32_t childrenLevel = level + 1;
10767*b7893ccfSSadaf Ebrahimi const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10768*b7893ccfSSadaf Ebrahimi const Node* const leftChild = curr->split.leftChild;
10769*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(leftChild != VMA_NULL);
10770*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(leftChild->offset == curr->offset);
10771*b7893ccfSSadaf Ebrahimi if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10772*b7893ccfSSadaf Ebrahimi {
10773*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(false && "ValidateNode for left child failed.");
10774*b7893ccfSSadaf Ebrahimi }
10775*b7893ccfSSadaf Ebrahimi const Node* const rightChild = leftChild->buddy;
10776*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10777*b7893ccfSSadaf Ebrahimi if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10778*b7893ccfSSadaf Ebrahimi {
10779*b7893ccfSSadaf Ebrahimi VMA_VALIDATE(false && "ValidateNode for right child failed.");
10780*b7893ccfSSadaf Ebrahimi }
10781*b7893ccfSSadaf Ebrahimi }
10782*b7893ccfSSadaf Ebrahimi break;
10783*b7893ccfSSadaf Ebrahimi default:
10784*b7893ccfSSadaf Ebrahimi return false;
10785*b7893ccfSSadaf Ebrahimi }
10786*b7893ccfSSadaf Ebrahimi
10787*b7893ccfSSadaf Ebrahimi return true;
10788*b7893ccfSSadaf Ebrahimi }
10789*b7893ccfSSadaf Ebrahimi
AllocSizeToLevel(VkDeviceSize allocSize)10790*b7893ccfSSadaf Ebrahimi uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10791*b7893ccfSSadaf Ebrahimi {
10792*b7893ccfSSadaf Ebrahimi // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10793*b7893ccfSSadaf Ebrahimi uint32_t level = 0;
10794*b7893ccfSSadaf Ebrahimi VkDeviceSize currLevelNodeSize = m_UsableSize;
10795*b7893ccfSSadaf Ebrahimi VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10796*b7893ccfSSadaf Ebrahimi while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10797*b7893ccfSSadaf Ebrahimi {
10798*b7893ccfSSadaf Ebrahimi ++level;
10799*b7893ccfSSadaf Ebrahimi currLevelNodeSize = nextLevelNodeSize;
10800*b7893ccfSSadaf Ebrahimi nextLevelNodeSize = currLevelNodeSize >> 1;
10801*b7893ccfSSadaf Ebrahimi }
10802*b7893ccfSSadaf Ebrahimi return level;
10803*b7893ccfSSadaf Ebrahimi }
10804*b7893ccfSSadaf Ebrahimi
FreeAtOffset(VmaAllocation alloc,VkDeviceSize offset)10805*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10806*b7893ccfSSadaf Ebrahimi {
10807*b7893ccfSSadaf Ebrahimi // Find node and level.
10808*b7893ccfSSadaf Ebrahimi Node* node = m_Root;
10809*b7893ccfSSadaf Ebrahimi VkDeviceSize nodeOffset = 0;
10810*b7893ccfSSadaf Ebrahimi uint32_t level = 0;
10811*b7893ccfSSadaf Ebrahimi VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10812*b7893ccfSSadaf Ebrahimi while(node->type == Node::TYPE_SPLIT)
10813*b7893ccfSSadaf Ebrahimi {
10814*b7893ccfSSadaf Ebrahimi const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10815*b7893ccfSSadaf Ebrahimi if(offset < nodeOffset + nextLevelSize)
10816*b7893ccfSSadaf Ebrahimi {
10817*b7893ccfSSadaf Ebrahimi node = node->split.leftChild;
10818*b7893ccfSSadaf Ebrahimi }
10819*b7893ccfSSadaf Ebrahimi else
10820*b7893ccfSSadaf Ebrahimi {
10821*b7893ccfSSadaf Ebrahimi node = node->split.leftChild->buddy;
10822*b7893ccfSSadaf Ebrahimi nodeOffset += nextLevelSize;
10823*b7893ccfSSadaf Ebrahimi }
10824*b7893ccfSSadaf Ebrahimi ++level;
10825*b7893ccfSSadaf Ebrahimi levelNodeSize = nextLevelSize;
10826*b7893ccfSSadaf Ebrahimi }
10827*b7893ccfSSadaf Ebrahimi
10828*b7893ccfSSadaf Ebrahimi VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10829*b7893ccfSSadaf Ebrahimi VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10830*b7893ccfSSadaf Ebrahimi
10831*b7893ccfSSadaf Ebrahimi ++m_FreeCount;
10832*b7893ccfSSadaf Ebrahimi --m_AllocationCount;
10833*b7893ccfSSadaf Ebrahimi m_SumFreeSize += alloc->GetSize();
10834*b7893ccfSSadaf Ebrahimi
10835*b7893ccfSSadaf Ebrahimi node->type = Node::TYPE_FREE;
10836*b7893ccfSSadaf Ebrahimi
10837*b7893ccfSSadaf Ebrahimi // Join free nodes if possible.
10838*b7893ccfSSadaf Ebrahimi while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10839*b7893ccfSSadaf Ebrahimi {
10840*b7893ccfSSadaf Ebrahimi RemoveFromFreeList(level, node->buddy);
10841*b7893ccfSSadaf Ebrahimi Node* const parent = node->parent;
10842*b7893ccfSSadaf Ebrahimi
10843*b7893ccfSSadaf Ebrahimi vma_delete(GetAllocationCallbacks(), node->buddy);
10844*b7893ccfSSadaf Ebrahimi vma_delete(GetAllocationCallbacks(), node);
10845*b7893ccfSSadaf Ebrahimi parent->type = Node::TYPE_FREE;
10846*b7893ccfSSadaf Ebrahimi
10847*b7893ccfSSadaf Ebrahimi node = parent;
10848*b7893ccfSSadaf Ebrahimi --level;
10849*b7893ccfSSadaf Ebrahimi //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
10850*b7893ccfSSadaf Ebrahimi --m_FreeCount;
10851*b7893ccfSSadaf Ebrahimi }
10852*b7893ccfSSadaf Ebrahimi
10853*b7893ccfSSadaf Ebrahimi AddToFreeListFront(level, node);
10854*b7893ccfSSadaf Ebrahimi }
10855*b7893ccfSSadaf Ebrahimi
CalcAllocationStatInfoNode(VmaStatInfo & outInfo,const Node * node,VkDeviceSize levelNodeSize)10856*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
10857*b7893ccfSSadaf Ebrahimi {
10858*b7893ccfSSadaf Ebrahimi switch(node->type)
10859*b7893ccfSSadaf Ebrahimi {
10860*b7893ccfSSadaf Ebrahimi case Node::TYPE_FREE:
10861*b7893ccfSSadaf Ebrahimi ++outInfo.unusedRangeCount;
10862*b7893ccfSSadaf Ebrahimi outInfo.unusedBytes += levelNodeSize;
10863*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
10864*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
10865*b7893ccfSSadaf Ebrahimi break;
10866*b7893ccfSSadaf Ebrahimi case Node::TYPE_ALLOCATION:
10867*b7893ccfSSadaf Ebrahimi {
10868*b7893ccfSSadaf Ebrahimi const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10869*b7893ccfSSadaf Ebrahimi ++outInfo.allocationCount;
10870*b7893ccfSSadaf Ebrahimi outInfo.usedBytes += allocSize;
10871*b7893ccfSSadaf Ebrahimi outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
10872*b7893ccfSSadaf Ebrahimi outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
10873*b7893ccfSSadaf Ebrahimi
10874*b7893ccfSSadaf Ebrahimi const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
10875*b7893ccfSSadaf Ebrahimi if(unusedRangeSize > 0)
10876*b7893ccfSSadaf Ebrahimi {
10877*b7893ccfSSadaf Ebrahimi ++outInfo.unusedRangeCount;
10878*b7893ccfSSadaf Ebrahimi outInfo.unusedBytes += unusedRangeSize;
10879*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
10880*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
10881*b7893ccfSSadaf Ebrahimi }
10882*b7893ccfSSadaf Ebrahimi }
10883*b7893ccfSSadaf Ebrahimi break;
10884*b7893ccfSSadaf Ebrahimi case Node::TYPE_SPLIT:
10885*b7893ccfSSadaf Ebrahimi {
10886*b7893ccfSSadaf Ebrahimi const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10887*b7893ccfSSadaf Ebrahimi const Node* const leftChild = node->split.leftChild;
10888*b7893ccfSSadaf Ebrahimi CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
10889*b7893ccfSSadaf Ebrahimi const Node* const rightChild = leftChild->buddy;
10890*b7893ccfSSadaf Ebrahimi CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
10891*b7893ccfSSadaf Ebrahimi }
10892*b7893ccfSSadaf Ebrahimi break;
10893*b7893ccfSSadaf Ebrahimi default:
10894*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
10895*b7893ccfSSadaf Ebrahimi }
10896*b7893ccfSSadaf Ebrahimi }
10897*b7893ccfSSadaf Ebrahimi
AddToFreeListFront(uint32_t level,Node * node)10898*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
10899*b7893ccfSSadaf Ebrahimi {
10900*b7893ccfSSadaf Ebrahimi VMA_ASSERT(node->type == Node::TYPE_FREE);
10901*b7893ccfSSadaf Ebrahimi
10902*b7893ccfSSadaf Ebrahimi // List is empty.
10903*b7893ccfSSadaf Ebrahimi Node* const frontNode = m_FreeList[level].front;
10904*b7893ccfSSadaf Ebrahimi if(frontNode == VMA_NULL)
10905*b7893ccfSSadaf Ebrahimi {
10906*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
10907*b7893ccfSSadaf Ebrahimi node->free.prev = node->free.next = VMA_NULL;
10908*b7893ccfSSadaf Ebrahimi m_FreeList[level].front = m_FreeList[level].back = node;
10909*b7893ccfSSadaf Ebrahimi }
10910*b7893ccfSSadaf Ebrahimi else
10911*b7893ccfSSadaf Ebrahimi {
10912*b7893ccfSSadaf Ebrahimi VMA_ASSERT(frontNode->free.prev == VMA_NULL);
10913*b7893ccfSSadaf Ebrahimi node->free.prev = VMA_NULL;
10914*b7893ccfSSadaf Ebrahimi node->free.next = frontNode;
10915*b7893ccfSSadaf Ebrahimi frontNode->free.prev = node;
10916*b7893ccfSSadaf Ebrahimi m_FreeList[level].front = node;
10917*b7893ccfSSadaf Ebrahimi }
10918*b7893ccfSSadaf Ebrahimi }
10919*b7893ccfSSadaf Ebrahimi
RemoveFromFreeList(uint32_t level,Node * node)10920*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
10921*b7893ccfSSadaf Ebrahimi {
10922*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
10923*b7893ccfSSadaf Ebrahimi
10924*b7893ccfSSadaf Ebrahimi // It is at the front.
10925*b7893ccfSSadaf Ebrahimi if(node->free.prev == VMA_NULL)
10926*b7893ccfSSadaf Ebrahimi {
10927*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_FreeList[level].front == node);
10928*b7893ccfSSadaf Ebrahimi m_FreeList[level].front = node->free.next;
10929*b7893ccfSSadaf Ebrahimi }
10930*b7893ccfSSadaf Ebrahimi else
10931*b7893ccfSSadaf Ebrahimi {
10932*b7893ccfSSadaf Ebrahimi Node* const prevFreeNode = node->free.prev;
10933*b7893ccfSSadaf Ebrahimi VMA_ASSERT(prevFreeNode->free.next == node);
10934*b7893ccfSSadaf Ebrahimi prevFreeNode->free.next = node->free.next;
10935*b7893ccfSSadaf Ebrahimi }
10936*b7893ccfSSadaf Ebrahimi
10937*b7893ccfSSadaf Ebrahimi // It is at the back.
10938*b7893ccfSSadaf Ebrahimi if(node->free.next == VMA_NULL)
10939*b7893ccfSSadaf Ebrahimi {
10940*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_FreeList[level].back == node);
10941*b7893ccfSSadaf Ebrahimi m_FreeList[level].back = node->free.prev;
10942*b7893ccfSSadaf Ebrahimi }
10943*b7893ccfSSadaf Ebrahimi else
10944*b7893ccfSSadaf Ebrahimi {
10945*b7893ccfSSadaf Ebrahimi Node* const nextFreeNode = node->free.next;
10946*b7893ccfSSadaf Ebrahimi VMA_ASSERT(nextFreeNode->free.prev == node);
10947*b7893ccfSSadaf Ebrahimi nextFreeNode->free.prev = node->free.prev;
10948*b7893ccfSSadaf Ebrahimi }
10949*b7893ccfSSadaf Ebrahimi }
10950*b7893ccfSSadaf Ebrahimi
10951*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
PrintDetailedMapNode(class VmaJsonWriter & json,const Node * node,VkDeviceSize levelNodeSize)10952*b7893ccfSSadaf Ebrahimi void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
10953*b7893ccfSSadaf Ebrahimi {
10954*b7893ccfSSadaf Ebrahimi switch(node->type)
10955*b7893ccfSSadaf Ebrahimi {
10956*b7893ccfSSadaf Ebrahimi case Node::TYPE_FREE:
10957*b7893ccfSSadaf Ebrahimi PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
10958*b7893ccfSSadaf Ebrahimi break;
10959*b7893ccfSSadaf Ebrahimi case Node::TYPE_ALLOCATION:
10960*b7893ccfSSadaf Ebrahimi {
10961*b7893ccfSSadaf Ebrahimi PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
10962*b7893ccfSSadaf Ebrahimi const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10963*b7893ccfSSadaf Ebrahimi if(allocSize < levelNodeSize)
10964*b7893ccfSSadaf Ebrahimi {
10965*b7893ccfSSadaf Ebrahimi PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
10966*b7893ccfSSadaf Ebrahimi }
10967*b7893ccfSSadaf Ebrahimi }
10968*b7893ccfSSadaf Ebrahimi break;
10969*b7893ccfSSadaf Ebrahimi case Node::TYPE_SPLIT:
10970*b7893ccfSSadaf Ebrahimi {
10971*b7893ccfSSadaf Ebrahimi const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10972*b7893ccfSSadaf Ebrahimi const Node* const leftChild = node->split.leftChild;
10973*b7893ccfSSadaf Ebrahimi PrintDetailedMapNode(json, leftChild, childrenNodeSize);
10974*b7893ccfSSadaf Ebrahimi const Node* const rightChild = leftChild->buddy;
10975*b7893ccfSSadaf Ebrahimi PrintDetailedMapNode(json, rightChild, childrenNodeSize);
10976*b7893ccfSSadaf Ebrahimi }
10977*b7893ccfSSadaf Ebrahimi break;
10978*b7893ccfSSadaf Ebrahimi default:
10979*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
10980*b7893ccfSSadaf Ebrahimi }
10981*b7893ccfSSadaf Ebrahimi }
10982*b7893ccfSSadaf Ebrahimi #endif // #if VMA_STATS_STRING_ENABLED
10983*b7893ccfSSadaf Ebrahimi
10984*b7893ccfSSadaf Ebrahimi
10985*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
10986*b7893ccfSSadaf Ebrahimi // class VmaDeviceMemoryBlock
10987*b7893ccfSSadaf Ebrahimi
VmaDeviceMemoryBlock(VmaAllocator hAllocator)10988*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
10989*b7893ccfSSadaf Ebrahimi m_pMetadata(VMA_NULL),
10990*b7893ccfSSadaf Ebrahimi m_MemoryTypeIndex(UINT32_MAX),
10991*b7893ccfSSadaf Ebrahimi m_Id(0),
10992*b7893ccfSSadaf Ebrahimi m_hMemory(VK_NULL_HANDLE),
10993*b7893ccfSSadaf Ebrahimi m_MapCount(0),
10994*b7893ccfSSadaf Ebrahimi m_pMappedData(VMA_NULL)
10995*b7893ccfSSadaf Ebrahimi {
10996*b7893ccfSSadaf Ebrahimi }
10997*b7893ccfSSadaf Ebrahimi
Init(VmaAllocator hAllocator,uint32_t newMemoryTypeIndex,VkDeviceMemory newMemory,VkDeviceSize newSize,uint32_t id,uint32_t algorithm)10998*b7893ccfSSadaf Ebrahimi void VmaDeviceMemoryBlock::Init(
10999*b7893ccfSSadaf Ebrahimi VmaAllocator hAllocator,
11000*b7893ccfSSadaf Ebrahimi uint32_t newMemoryTypeIndex,
11001*b7893ccfSSadaf Ebrahimi VkDeviceMemory newMemory,
11002*b7893ccfSSadaf Ebrahimi VkDeviceSize newSize,
11003*b7893ccfSSadaf Ebrahimi uint32_t id,
11004*b7893ccfSSadaf Ebrahimi uint32_t algorithm)
11005*b7893ccfSSadaf Ebrahimi {
11006*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11007*b7893ccfSSadaf Ebrahimi
11008*b7893ccfSSadaf Ebrahimi m_MemoryTypeIndex = newMemoryTypeIndex;
11009*b7893ccfSSadaf Ebrahimi m_Id = id;
11010*b7893ccfSSadaf Ebrahimi m_hMemory = newMemory;
11011*b7893ccfSSadaf Ebrahimi
11012*b7893ccfSSadaf Ebrahimi switch(algorithm)
11013*b7893ccfSSadaf Ebrahimi {
11014*b7893ccfSSadaf Ebrahimi case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT:
11015*b7893ccfSSadaf Ebrahimi m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11016*b7893ccfSSadaf Ebrahimi break;
11017*b7893ccfSSadaf Ebrahimi case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT:
11018*b7893ccfSSadaf Ebrahimi m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11019*b7893ccfSSadaf Ebrahimi break;
11020*b7893ccfSSadaf Ebrahimi default:
11021*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
11022*b7893ccfSSadaf Ebrahimi // Fall-through.
11023*b7893ccfSSadaf Ebrahimi case 0:
11024*b7893ccfSSadaf Ebrahimi m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11025*b7893ccfSSadaf Ebrahimi }
11026*b7893ccfSSadaf Ebrahimi m_pMetadata->Init(newSize);
11027*b7893ccfSSadaf Ebrahimi }
11028*b7893ccfSSadaf Ebrahimi
Destroy(VmaAllocator allocator)11029*b7893ccfSSadaf Ebrahimi void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11030*b7893ccfSSadaf Ebrahimi {
11031*b7893ccfSSadaf Ebrahimi // This is the most important assert in the entire library.
11032*b7893ccfSSadaf Ebrahimi // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11033*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11034*b7893ccfSSadaf Ebrahimi
11035*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11036*b7893ccfSSadaf Ebrahimi allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11037*b7893ccfSSadaf Ebrahimi m_hMemory = VK_NULL_HANDLE;
11038*b7893ccfSSadaf Ebrahimi
11039*b7893ccfSSadaf Ebrahimi vma_delete(allocator, m_pMetadata);
11040*b7893ccfSSadaf Ebrahimi m_pMetadata = VMA_NULL;
11041*b7893ccfSSadaf Ebrahimi }
11042*b7893ccfSSadaf Ebrahimi
Validate()11043*b7893ccfSSadaf Ebrahimi bool VmaDeviceMemoryBlock::Validate() const
11044*b7893ccfSSadaf Ebrahimi {
11045*b7893ccfSSadaf Ebrahimi VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11046*b7893ccfSSadaf Ebrahimi (m_pMetadata->GetSize() != 0));
11047*b7893ccfSSadaf Ebrahimi
11048*b7893ccfSSadaf Ebrahimi return m_pMetadata->Validate();
11049*b7893ccfSSadaf Ebrahimi }
11050*b7893ccfSSadaf Ebrahimi
CheckCorruption(VmaAllocator hAllocator)11051*b7893ccfSSadaf Ebrahimi VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11052*b7893ccfSSadaf Ebrahimi {
11053*b7893ccfSSadaf Ebrahimi void* pData = nullptr;
11054*b7893ccfSSadaf Ebrahimi VkResult res = Map(hAllocator, 1, &pData);
11055*b7893ccfSSadaf Ebrahimi if(res != VK_SUCCESS)
11056*b7893ccfSSadaf Ebrahimi {
11057*b7893ccfSSadaf Ebrahimi return res;
11058*b7893ccfSSadaf Ebrahimi }
11059*b7893ccfSSadaf Ebrahimi
11060*b7893ccfSSadaf Ebrahimi res = m_pMetadata->CheckCorruption(pData);
11061*b7893ccfSSadaf Ebrahimi
11062*b7893ccfSSadaf Ebrahimi Unmap(hAllocator, 1);
11063*b7893ccfSSadaf Ebrahimi
11064*b7893ccfSSadaf Ebrahimi return res;
11065*b7893ccfSSadaf Ebrahimi }
11066*b7893ccfSSadaf Ebrahimi
Map(VmaAllocator hAllocator,uint32_t count,void ** ppData)11067*b7893ccfSSadaf Ebrahimi VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11068*b7893ccfSSadaf Ebrahimi {
11069*b7893ccfSSadaf Ebrahimi if(count == 0)
11070*b7893ccfSSadaf Ebrahimi {
11071*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
11072*b7893ccfSSadaf Ebrahimi }
11073*b7893ccfSSadaf Ebrahimi
11074*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11075*b7893ccfSSadaf Ebrahimi if(m_MapCount != 0)
11076*b7893ccfSSadaf Ebrahimi {
11077*b7893ccfSSadaf Ebrahimi m_MapCount += count;
11078*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_pMappedData != VMA_NULL);
11079*b7893ccfSSadaf Ebrahimi if(ppData != VMA_NULL)
11080*b7893ccfSSadaf Ebrahimi {
11081*b7893ccfSSadaf Ebrahimi *ppData = m_pMappedData;
11082*b7893ccfSSadaf Ebrahimi }
11083*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
11084*b7893ccfSSadaf Ebrahimi }
11085*b7893ccfSSadaf Ebrahimi else
11086*b7893ccfSSadaf Ebrahimi {
11087*b7893ccfSSadaf Ebrahimi VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11088*b7893ccfSSadaf Ebrahimi hAllocator->m_hDevice,
11089*b7893ccfSSadaf Ebrahimi m_hMemory,
11090*b7893ccfSSadaf Ebrahimi 0, // offset
11091*b7893ccfSSadaf Ebrahimi VK_WHOLE_SIZE,
11092*b7893ccfSSadaf Ebrahimi 0, // flags
11093*b7893ccfSSadaf Ebrahimi &m_pMappedData);
11094*b7893ccfSSadaf Ebrahimi if(result == VK_SUCCESS)
11095*b7893ccfSSadaf Ebrahimi {
11096*b7893ccfSSadaf Ebrahimi if(ppData != VMA_NULL)
11097*b7893ccfSSadaf Ebrahimi {
11098*b7893ccfSSadaf Ebrahimi *ppData = m_pMappedData;
11099*b7893ccfSSadaf Ebrahimi }
11100*b7893ccfSSadaf Ebrahimi m_MapCount = count;
11101*b7893ccfSSadaf Ebrahimi }
11102*b7893ccfSSadaf Ebrahimi return result;
11103*b7893ccfSSadaf Ebrahimi }
11104*b7893ccfSSadaf Ebrahimi }
11105*b7893ccfSSadaf Ebrahimi
Unmap(VmaAllocator hAllocator,uint32_t count)11106*b7893ccfSSadaf Ebrahimi void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11107*b7893ccfSSadaf Ebrahimi {
11108*b7893ccfSSadaf Ebrahimi if(count == 0)
11109*b7893ccfSSadaf Ebrahimi {
11110*b7893ccfSSadaf Ebrahimi return;
11111*b7893ccfSSadaf Ebrahimi }
11112*b7893ccfSSadaf Ebrahimi
11113*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11114*b7893ccfSSadaf Ebrahimi if(m_MapCount >= count)
11115*b7893ccfSSadaf Ebrahimi {
11116*b7893ccfSSadaf Ebrahimi m_MapCount -= count;
11117*b7893ccfSSadaf Ebrahimi if(m_MapCount == 0)
11118*b7893ccfSSadaf Ebrahimi {
11119*b7893ccfSSadaf Ebrahimi m_pMappedData = VMA_NULL;
11120*b7893ccfSSadaf Ebrahimi (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11121*b7893ccfSSadaf Ebrahimi }
11122*b7893ccfSSadaf Ebrahimi }
11123*b7893ccfSSadaf Ebrahimi else
11124*b7893ccfSSadaf Ebrahimi {
11125*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11126*b7893ccfSSadaf Ebrahimi }
11127*b7893ccfSSadaf Ebrahimi }
11128*b7893ccfSSadaf Ebrahimi
WriteMagicValueAroundAllocation(VmaAllocator hAllocator,VkDeviceSize allocOffset,VkDeviceSize allocSize)11129*b7893ccfSSadaf Ebrahimi VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11130*b7893ccfSSadaf Ebrahimi {
11131*b7893ccfSSadaf Ebrahimi VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11132*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11133*b7893ccfSSadaf Ebrahimi
11134*b7893ccfSSadaf Ebrahimi void* pData;
11135*b7893ccfSSadaf Ebrahimi VkResult res = Map(hAllocator, 1, &pData);
11136*b7893ccfSSadaf Ebrahimi if(res != VK_SUCCESS)
11137*b7893ccfSSadaf Ebrahimi {
11138*b7893ccfSSadaf Ebrahimi return res;
11139*b7893ccfSSadaf Ebrahimi }
11140*b7893ccfSSadaf Ebrahimi
11141*b7893ccfSSadaf Ebrahimi VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11142*b7893ccfSSadaf Ebrahimi VmaWriteMagicValue(pData, allocOffset + allocSize);
11143*b7893ccfSSadaf Ebrahimi
11144*b7893ccfSSadaf Ebrahimi Unmap(hAllocator, 1);
11145*b7893ccfSSadaf Ebrahimi
11146*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
11147*b7893ccfSSadaf Ebrahimi }
11148*b7893ccfSSadaf Ebrahimi
ValidateMagicValueAroundAllocation(VmaAllocator hAllocator,VkDeviceSize allocOffset,VkDeviceSize allocSize)11149*b7893ccfSSadaf Ebrahimi VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11150*b7893ccfSSadaf Ebrahimi {
11151*b7893ccfSSadaf Ebrahimi VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11152*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11153*b7893ccfSSadaf Ebrahimi
11154*b7893ccfSSadaf Ebrahimi void* pData;
11155*b7893ccfSSadaf Ebrahimi VkResult res = Map(hAllocator, 1, &pData);
11156*b7893ccfSSadaf Ebrahimi if(res != VK_SUCCESS)
11157*b7893ccfSSadaf Ebrahimi {
11158*b7893ccfSSadaf Ebrahimi return res;
11159*b7893ccfSSadaf Ebrahimi }
11160*b7893ccfSSadaf Ebrahimi
11161*b7893ccfSSadaf Ebrahimi if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11162*b7893ccfSSadaf Ebrahimi {
11163*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11164*b7893ccfSSadaf Ebrahimi }
11165*b7893ccfSSadaf Ebrahimi else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11166*b7893ccfSSadaf Ebrahimi {
11167*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11168*b7893ccfSSadaf Ebrahimi }
11169*b7893ccfSSadaf Ebrahimi
11170*b7893ccfSSadaf Ebrahimi Unmap(hAllocator, 1);
11171*b7893ccfSSadaf Ebrahimi
11172*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
11173*b7893ccfSSadaf Ebrahimi }
11174*b7893ccfSSadaf Ebrahimi
BindBufferMemory(const VmaAllocator hAllocator,const VmaAllocation hAllocation,VkBuffer hBuffer)11175*b7893ccfSSadaf Ebrahimi VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11176*b7893ccfSSadaf Ebrahimi const VmaAllocator hAllocator,
11177*b7893ccfSSadaf Ebrahimi const VmaAllocation hAllocation,
11178*b7893ccfSSadaf Ebrahimi VkBuffer hBuffer)
11179*b7893ccfSSadaf Ebrahimi {
11180*b7893ccfSSadaf Ebrahimi VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11181*b7893ccfSSadaf Ebrahimi hAllocation->GetBlock() == this);
11182*b7893ccfSSadaf Ebrahimi // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11183*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11184*b7893ccfSSadaf Ebrahimi return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11185*b7893ccfSSadaf Ebrahimi hAllocator->m_hDevice,
11186*b7893ccfSSadaf Ebrahimi hBuffer,
11187*b7893ccfSSadaf Ebrahimi m_hMemory,
11188*b7893ccfSSadaf Ebrahimi hAllocation->GetOffset());
11189*b7893ccfSSadaf Ebrahimi }
11190*b7893ccfSSadaf Ebrahimi
BindImageMemory(const VmaAllocator hAllocator,const VmaAllocation hAllocation,VkImage hImage)11191*b7893ccfSSadaf Ebrahimi VkResult VmaDeviceMemoryBlock::BindImageMemory(
11192*b7893ccfSSadaf Ebrahimi const VmaAllocator hAllocator,
11193*b7893ccfSSadaf Ebrahimi const VmaAllocation hAllocation,
11194*b7893ccfSSadaf Ebrahimi VkImage hImage)
11195*b7893ccfSSadaf Ebrahimi {
11196*b7893ccfSSadaf Ebrahimi VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11197*b7893ccfSSadaf Ebrahimi hAllocation->GetBlock() == this);
11198*b7893ccfSSadaf Ebrahimi // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11199*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11200*b7893ccfSSadaf Ebrahimi return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11201*b7893ccfSSadaf Ebrahimi hAllocator->m_hDevice,
11202*b7893ccfSSadaf Ebrahimi hImage,
11203*b7893ccfSSadaf Ebrahimi m_hMemory,
11204*b7893ccfSSadaf Ebrahimi hAllocation->GetOffset());
11205*b7893ccfSSadaf Ebrahimi }
11206*b7893ccfSSadaf Ebrahimi
InitStatInfo(VmaStatInfo & outInfo)11207*b7893ccfSSadaf Ebrahimi static void InitStatInfo(VmaStatInfo& outInfo)
11208*b7893ccfSSadaf Ebrahimi {
11209*b7893ccfSSadaf Ebrahimi memset(&outInfo, 0, sizeof(outInfo));
11210*b7893ccfSSadaf Ebrahimi outInfo.allocationSizeMin = UINT64_MAX;
11211*b7893ccfSSadaf Ebrahimi outInfo.unusedRangeSizeMin = UINT64_MAX;
11212*b7893ccfSSadaf Ebrahimi }
11213*b7893ccfSSadaf Ebrahimi
11214*b7893ccfSSadaf Ebrahimi // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
VmaAddStatInfo(VmaStatInfo & inoutInfo,const VmaStatInfo & srcInfo)11215*b7893ccfSSadaf Ebrahimi static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11216*b7893ccfSSadaf Ebrahimi {
11217*b7893ccfSSadaf Ebrahimi inoutInfo.blockCount += srcInfo.blockCount;
11218*b7893ccfSSadaf Ebrahimi inoutInfo.allocationCount += srcInfo.allocationCount;
11219*b7893ccfSSadaf Ebrahimi inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11220*b7893ccfSSadaf Ebrahimi inoutInfo.usedBytes += srcInfo.usedBytes;
11221*b7893ccfSSadaf Ebrahimi inoutInfo.unusedBytes += srcInfo.unusedBytes;
11222*b7893ccfSSadaf Ebrahimi inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11223*b7893ccfSSadaf Ebrahimi inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11224*b7893ccfSSadaf Ebrahimi inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11225*b7893ccfSSadaf Ebrahimi inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11226*b7893ccfSSadaf Ebrahimi }
11227*b7893ccfSSadaf Ebrahimi
VmaPostprocessCalcStatInfo(VmaStatInfo & inoutInfo)11228*b7893ccfSSadaf Ebrahimi static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11229*b7893ccfSSadaf Ebrahimi {
11230*b7893ccfSSadaf Ebrahimi inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11231*b7893ccfSSadaf Ebrahimi VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11232*b7893ccfSSadaf Ebrahimi inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11233*b7893ccfSSadaf Ebrahimi VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11234*b7893ccfSSadaf Ebrahimi }
11235*b7893ccfSSadaf Ebrahimi
VmaPool_T(VmaAllocator hAllocator,const VmaPoolCreateInfo & createInfo,VkDeviceSize preferredBlockSize)11236*b7893ccfSSadaf Ebrahimi VmaPool_T::VmaPool_T(
11237*b7893ccfSSadaf Ebrahimi VmaAllocator hAllocator,
11238*b7893ccfSSadaf Ebrahimi const VmaPoolCreateInfo& createInfo,
11239*b7893ccfSSadaf Ebrahimi VkDeviceSize preferredBlockSize) :
11240*b7893ccfSSadaf Ebrahimi m_BlockVector(
11241*b7893ccfSSadaf Ebrahimi hAllocator,
11242*b7893ccfSSadaf Ebrahimi createInfo.memoryTypeIndex,
11243*b7893ccfSSadaf Ebrahimi createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11244*b7893ccfSSadaf Ebrahimi createInfo.minBlockCount,
11245*b7893ccfSSadaf Ebrahimi createInfo.maxBlockCount,
11246*b7893ccfSSadaf Ebrahimi (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11247*b7893ccfSSadaf Ebrahimi createInfo.frameInUseCount,
11248*b7893ccfSSadaf Ebrahimi true, // isCustomPool
11249*b7893ccfSSadaf Ebrahimi createInfo.blockSize != 0, // explicitBlockSize
11250*b7893ccfSSadaf Ebrahimi createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11251*b7893ccfSSadaf Ebrahimi m_Id(0)
11252*b7893ccfSSadaf Ebrahimi {
11253*b7893ccfSSadaf Ebrahimi }
11254*b7893ccfSSadaf Ebrahimi
~VmaPool_T()11255*b7893ccfSSadaf Ebrahimi VmaPool_T::~VmaPool_T()
11256*b7893ccfSSadaf Ebrahimi {
11257*b7893ccfSSadaf Ebrahimi }
11258*b7893ccfSSadaf Ebrahimi
11259*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
11260*b7893ccfSSadaf Ebrahimi
11261*b7893ccfSSadaf Ebrahimi #endif // #if VMA_STATS_STRING_ENABLED
11262*b7893ccfSSadaf Ebrahimi
VmaBlockVector(VmaAllocator hAllocator,uint32_t memoryTypeIndex,VkDeviceSize preferredBlockSize,size_t minBlockCount,size_t maxBlockCount,VkDeviceSize bufferImageGranularity,uint32_t frameInUseCount,bool isCustomPool,bool explicitBlockSize,uint32_t algorithm)11263*b7893ccfSSadaf Ebrahimi VmaBlockVector::VmaBlockVector(
11264*b7893ccfSSadaf Ebrahimi VmaAllocator hAllocator,
11265*b7893ccfSSadaf Ebrahimi uint32_t memoryTypeIndex,
11266*b7893ccfSSadaf Ebrahimi VkDeviceSize preferredBlockSize,
11267*b7893ccfSSadaf Ebrahimi size_t minBlockCount,
11268*b7893ccfSSadaf Ebrahimi size_t maxBlockCount,
11269*b7893ccfSSadaf Ebrahimi VkDeviceSize bufferImageGranularity,
11270*b7893ccfSSadaf Ebrahimi uint32_t frameInUseCount,
11271*b7893ccfSSadaf Ebrahimi bool isCustomPool,
11272*b7893ccfSSadaf Ebrahimi bool explicitBlockSize,
11273*b7893ccfSSadaf Ebrahimi uint32_t algorithm) :
11274*b7893ccfSSadaf Ebrahimi m_hAllocator(hAllocator),
11275*b7893ccfSSadaf Ebrahimi m_MemoryTypeIndex(memoryTypeIndex),
11276*b7893ccfSSadaf Ebrahimi m_PreferredBlockSize(preferredBlockSize),
11277*b7893ccfSSadaf Ebrahimi m_MinBlockCount(minBlockCount),
11278*b7893ccfSSadaf Ebrahimi m_MaxBlockCount(maxBlockCount),
11279*b7893ccfSSadaf Ebrahimi m_BufferImageGranularity(bufferImageGranularity),
11280*b7893ccfSSadaf Ebrahimi m_FrameInUseCount(frameInUseCount),
11281*b7893ccfSSadaf Ebrahimi m_IsCustomPool(isCustomPool),
11282*b7893ccfSSadaf Ebrahimi m_ExplicitBlockSize(explicitBlockSize),
11283*b7893ccfSSadaf Ebrahimi m_Algorithm(algorithm),
11284*b7893ccfSSadaf Ebrahimi m_HasEmptyBlock(false),
11285*b7893ccfSSadaf Ebrahimi m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11286*b7893ccfSSadaf Ebrahimi m_NextBlockId(0)
11287*b7893ccfSSadaf Ebrahimi {
11288*b7893ccfSSadaf Ebrahimi }
11289*b7893ccfSSadaf Ebrahimi
~VmaBlockVector()11290*b7893ccfSSadaf Ebrahimi VmaBlockVector::~VmaBlockVector()
11291*b7893ccfSSadaf Ebrahimi {
11292*b7893ccfSSadaf Ebrahimi for(size_t i = m_Blocks.size(); i--; )
11293*b7893ccfSSadaf Ebrahimi {
11294*b7893ccfSSadaf Ebrahimi m_Blocks[i]->Destroy(m_hAllocator);
11295*b7893ccfSSadaf Ebrahimi vma_delete(m_hAllocator, m_Blocks[i]);
11296*b7893ccfSSadaf Ebrahimi }
11297*b7893ccfSSadaf Ebrahimi }
11298*b7893ccfSSadaf Ebrahimi
CreateMinBlocks()11299*b7893ccfSSadaf Ebrahimi VkResult VmaBlockVector::CreateMinBlocks()
11300*b7893ccfSSadaf Ebrahimi {
11301*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < m_MinBlockCount; ++i)
11302*b7893ccfSSadaf Ebrahimi {
11303*b7893ccfSSadaf Ebrahimi VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11304*b7893ccfSSadaf Ebrahimi if(res != VK_SUCCESS)
11305*b7893ccfSSadaf Ebrahimi {
11306*b7893ccfSSadaf Ebrahimi return res;
11307*b7893ccfSSadaf Ebrahimi }
11308*b7893ccfSSadaf Ebrahimi }
11309*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
11310*b7893ccfSSadaf Ebrahimi }
11311*b7893ccfSSadaf Ebrahimi
GetPoolStats(VmaPoolStats * pStats)11312*b7893ccfSSadaf Ebrahimi void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11313*b7893ccfSSadaf Ebrahimi {
11314*b7893ccfSSadaf Ebrahimi VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11315*b7893ccfSSadaf Ebrahimi
11316*b7893ccfSSadaf Ebrahimi const size_t blockCount = m_Blocks.size();
11317*b7893ccfSSadaf Ebrahimi
11318*b7893ccfSSadaf Ebrahimi pStats->size = 0;
11319*b7893ccfSSadaf Ebrahimi pStats->unusedSize = 0;
11320*b7893ccfSSadaf Ebrahimi pStats->allocationCount = 0;
11321*b7893ccfSSadaf Ebrahimi pStats->unusedRangeCount = 0;
11322*b7893ccfSSadaf Ebrahimi pStats->unusedRangeSizeMax = 0;
11323*b7893ccfSSadaf Ebrahimi pStats->blockCount = blockCount;
11324*b7893ccfSSadaf Ebrahimi
11325*b7893ccfSSadaf Ebrahimi for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11326*b7893ccfSSadaf Ebrahimi {
11327*b7893ccfSSadaf Ebrahimi const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11328*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pBlock);
11329*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(pBlock->Validate());
11330*b7893ccfSSadaf Ebrahimi pBlock->m_pMetadata->AddPoolStats(*pStats);
11331*b7893ccfSSadaf Ebrahimi }
11332*b7893ccfSSadaf Ebrahimi }
11333*b7893ccfSSadaf Ebrahimi
IsCorruptionDetectionEnabled()11334*b7893ccfSSadaf Ebrahimi bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11335*b7893ccfSSadaf Ebrahimi {
11336*b7893ccfSSadaf Ebrahimi const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11337*b7893ccfSSadaf Ebrahimi return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11338*b7893ccfSSadaf Ebrahimi (VMA_DEBUG_MARGIN > 0) &&
11339*b7893ccfSSadaf Ebrahimi (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11340*b7893ccfSSadaf Ebrahimi }
11341*b7893ccfSSadaf Ebrahimi
11342*b7893ccfSSadaf Ebrahimi static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11343*b7893ccfSSadaf Ebrahimi
Allocate(VmaPool hCurrentPool,uint32_t currentFrameIndex,VkDeviceSize size,VkDeviceSize alignment,const VmaAllocationCreateInfo & createInfo,VmaSuballocationType suballocType,size_t allocationCount,VmaAllocation * pAllocations)11344*b7893ccfSSadaf Ebrahimi VkResult VmaBlockVector::Allocate(
11345*b7893ccfSSadaf Ebrahimi VmaPool hCurrentPool,
11346*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
11347*b7893ccfSSadaf Ebrahimi VkDeviceSize size,
11348*b7893ccfSSadaf Ebrahimi VkDeviceSize alignment,
11349*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo& createInfo,
11350*b7893ccfSSadaf Ebrahimi VmaSuballocationType suballocType,
11351*b7893ccfSSadaf Ebrahimi size_t allocationCount,
11352*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocations)
11353*b7893ccfSSadaf Ebrahimi {
11354*b7893ccfSSadaf Ebrahimi size_t allocIndex;
11355*b7893ccfSSadaf Ebrahimi VkResult res = VK_SUCCESS;
11356*b7893ccfSSadaf Ebrahimi
11357*b7893ccfSSadaf Ebrahimi {
11358*b7893ccfSSadaf Ebrahimi VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11359*b7893ccfSSadaf Ebrahimi for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11360*b7893ccfSSadaf Ebrahimi {
11361*b7893ccfSSadaf Ebrahimi res = AllocatePage(
11362*b7893ccfSSadaf Ebrahimi hCurrentPool,
11363*b7893ccfSSadaf Ebrahimi currentFrameIndex,
11364*b7893ccfSSadaf Ebrahimi size,
11365*b7893ccfSSadaf Ebrahimi alignment,
11366*b7893ccfSSadaf Ebrahimi createInfo,
11367*b7893ccfSSadaf Ebrahimi suballocType,
11368*b7893ccfSSadaf Ebrahimi pAllocations + allocIndex);
11369*b7893ccfSSadaf Ebrahimi if(res != VK_SUCCESS)
11370*b7893ccfSSadaf Ebrahimi {
11371*b7893ccfSSadaf Ebrahimi break;
11372*b7893ccfSSadaf Ebrahimi }
11373*b7893ccfSSadaf Ebrahimi }
11374*b7893ccfSSadaf Ebrahimi }
11375*b7893ccfSSadaf Ebrahimi
11376*b7893ccfSSadaf Ebrahimi if(res != VK_SUCCESS)
11377*b7893ccfSSadaf Ebrahimi {
11378*b7893ccfSSadaf Ebrahimi // Free all already created allocations.
11379*b7893ccfSSadaf Ebrahimi while(allocIndex--)
11380*b7893ccfSSadaf Ebrahimi {
11381*b7893ccfSSadaf Ebrahimi Free(pAllocations[allocIndex]);
11382*b7893ccfSSadaf Ebrahimi }
11383*b7893ccfSSadaf Ebrahimi memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11384*b7893ccfSSadaf Ebrahimi }
11385*b7893ccfSSadaf Ebrahimi
11386*b7893ccfSSadaf Ebrahimi return res;
11387*b7893ccfSSadaf Ebrahimi }
11388*b7893ccfSSadaf Ebrahimi
AllocatePage(VmaPool hCurrentPool,uint32_t currentFrameIndex,VkDeviceSize size,VkDeviceSize alignment,const VmaAllocationCreateInfo & createInfo,VmaSuballocationType suballocType,VmaAllocation * pAllocation)11389*b7893ccfSSadaf Ebrahimi VkResult VmaBlockVector::AllocatePage(
11390*b7893ccfSSadaf Ebrahimi VmaPool hCurrentPool,
11391*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
11392*b7893ccfSSadaf Ebrahimi VkDeviceSize size,
11393*b7893ccfSSadaf Ebrahimi VkDeviceSize alignment,
11394*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo& createInfo,
11395*b7893ccfSSadaf Ebrahimi VmaSuballocationType suballocType,
11396*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocation)
11397*b7893ccfSSadaf Ebrahimi {
11398*b7893ccfSSadaf Ebrahimi const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11399*b7893ccfSSadaf Ebrahimi bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11400*b7893ccfSSadaf Ebrahimi const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11401*b7893ccfSSadaf Ebrahimi const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11402*b7893ccfSSadaf Ebrahimi const bool canCreateNewBlock =
11403*b7893ccfSSadaf Ebrahimi ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11404*b7893ccfSSadaf Ebrahimi (m_Blocks.size() < m_MaxBlockCount);
11405*b7893ccfSSadaf Ebrahimi uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11406*b7893ccfSSadaf Ebrahimi
11407*b7893ccfSSadaf Ebrahimi // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11408*b7893ccfSSadaf Ebrahimi // Which in turn is available only when maxBlockCount = 1.
11409*b7893ccfSSadaf Ebrahimi if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11410*b7893ccfSSadaf Ebrahimi {
11411*b7893ccfSSadaf Ebrahimi canMakeOtherLost = false;
11412*b7893ccfSSadaf Ebrahimi }
11413*b7893ccfSSadaf Ebrahimi
11414*b7893ccfSSadaf Ebrahimi // Upper address can only be used with linear allocator and within single memory block.
11415*b7893ccfSSadaf Ebrahimi if(isUpperAddress &&
11416*b7893ccfSSadaf Ebrahimi (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11417*b7893ccfSSadaf Ebrahimi {
11418*b7893ccfSSadaf Ebrahimi return VK_ERROR_FEATURE_NOT_PRESENT;
11419*b7893ccfSSadaf Ebrahimi }
11420*b7893ccfSSadaf Ebrahimi
11421*b7893ccfSSadaf Ebrahimi // Validate strategy.
11422*b7893ccfSSadaf Ebrahimi switch(strategy)
11423*b7893ccfSSadaf Ebrahimi {
11424*b7893ccfSSadaf Ebrahimi case 0:
11425*b7893ccfSSadaf Ebrahimi strategy = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT;
11426*b7893ccfSSadaf Ebrahimi break;
11427*b7893ccfSSadaf Ebrahimi case VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT:
11428*b7893ccfSSadaf Ebrahimi case VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT:
11429*b7893ccfSSadaf Ebrahimi case VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT:
11430*b7893ccfSSadaf Ebrahimi break;
11431*b7893ccfSSadaf Ebrahimi default:
11432*b7893ccfSSadaf Ebrahimi return VK_ERROR_FEATURE_NOT_PRESENT;
11433*b7893ccfSSadaf Ebrahimi }
11434*b7893ccfSSadaf Ebrahimi
11435*b7893ccfSSadaf Ebrahimi // Early reject: requested allocation size is larger that maximum block size for this block vector.
11436*b7893ccfSSadaf Ebrahimi if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11437*b7893ccfSSadaf Ebrahimi {
11438*b7893ccfSSadaf Ebrahimi return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11439*b7893ccfSSadaf Ebrahimi }
11440*b7893ccfSSadaf Ebrahimi
11441*b7893ccfSSadaf Ebrahimi /*
11442*b7893ccfSSadaf Ebrahimi Under certain condition, this whole section can be skipped for optimization, so
11443*b7893ccfSSadaf Ebrahimi we move on directly to trying to allocate with canMakeOtherLost. That's the case
11444*b7893ccfSSadaf Ebrahimi e.g. for custom pools with linear algorithm.
11445*b7893ccfSSadaf Ebrahimi */
11446*b7893ccfSSadaf Ebrahimi if(!canMakeOtherLost || canCreateNewBlock)
11447*b7893ccfSSadaf Ebrahimi {
11448*b7893ccfSSadaf Ebrahimi // 1. Search existing allocations. Try to allocate without making other allocations lost.
11449*b7893ccfSSadaf Ebrahimi VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11450*b7893ccfSSadaf Ebrahimi allocFlagsCopy &= ~VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT;
11451*b7893ccfSSadaf Ebrahimi
11452*b7893ccfSSadaf Ebrahimi if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11453*b7893ccfSSadaf Ebrahimi {
11454*b7893ccfSSadaf Ebrahimi // Use only last block.
11455*b7893ccfSSadaf Ebrahimi if(!m_Blocks.empty())
11456*b7893ccfSSadaf Ebrahimi {
11457*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11458*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pCurrBlock);
11459*b7893ccfSSadaf Ebrahimi VkResult res = AllocateFromBlock(
11460*b7893ccfSSadaf Ebrahimi pCurrBlock,
11461*b7893ccfSSadaf Ebrahimi hCurrentPool,
11462*b7893ccfSSadaf Ebrahimi currentFrameIndex,
11463*b7893ccfSSadaf Ebrahimi size,
11464*b7893ccfSSadaf Ebrahimi alignment,
11465*b7893ccfSSadaf Ebrahimi allocFlagsCopy,
11466*b7893ccfSSadaf Ebrahimi createInfo.pUserData,
11467*b7893ccfSSadaf Ebrahimi suballocType,
11468*b7893ccfSSadaf Ebrahimi strategy,
11469*b7893ccfSSadaf Ebrahimi pAllocation);
11470*b7893ccfSSadaf Ebrahimi if(res == VK_SUCCESS)
11471*b7893ccfSSadaf Ebrahimi {
11472*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11473*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
11474*b7893ccfSSadaf Ebrahimi }
11475*b7893ccfSSadaf Ebrahimi }
11476*b7893ccfSSadaf Ebrahimi }
11477*b7893ccfSSadaf Ebrahimi else
11478*b7893ccfSSadaf Ebrahimi {
11479*b7893ccfSSadaf Ebrahimi if(strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT)
11480*b7893ccfSSadaf Ebrahimi {
11481*b7893ccfSSadaf Ebrahimi // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11482*b7893ccfSSadaf Ebrahimi for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11483*b7893ccfSSadaf Ebrahimi {
11484*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11485*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pCurrBlock);
11486*b7893ccfSSadaf Ebrahimi VkResult res = AllocateFromBlock(
11487*b7893ccfSSadaf Ebrahimi pCurrBlock,
11488*b7893ccfSSadaf Ebrahimi hCurrentPool,
11489*b7893ccfSSadaf Ebrahimi currentFrameIndex,
11490*b7893ccfSSadaf Ebrahimi size,
11491*b7893ccfSSadaf Ebrahimi alignment,
11492*b7893ccfSSadaf Ebrahimi allocFlagsCopy,
11493*b7893ccfSSadaf Ebrahimi createInfo.pUserData,
11494*b7893ccfSSadaf Ebrahimi suballocType,
11495*b7893ccfSSadaf Ebrahimi strategy,
11496*b7893ccfSSadaf Ebrahimi pAllocation);
11497*b7893ccfSSadaf Ebrahimi if(res == VK_SUCCESS)
11498*b7893ccfSSadaf Ebrahimi {
11499*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11500*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
11501*b7893ccfSSadaf Ebrahimi }
11502*b7893ccfSSadaf Ebrahimi }
11503*b7893ccfSSadaf Ebrahimi }
11504*b7893ccfSSadaf Ebrahimi else // WORST_FIT, FIRST_FIT
11505*b7893ccfSSadaf Ebrahimi {
11506*b7893ccfSSadaf Ebrahimi // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11507*b7893ccfSSadaf Ebrahimi for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11508*b7893ccfSSadaf Ebrahimi {
11509*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11510*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pCurrBlock);
11511*b7893ccfSSadaf Ebrahimi VkResult res = AllocateFromBlock(
11512*b7893ccfSSadaf Ebrahimi pCurrBlock,
11513*b7893ccfSSadaf Ebrahimi hCurrentPool,
11514*b7893ccfSSadaf Ebrahimi currentFrameIndex,
11515*b7893ccfSSadaf Ebrahimi size,
11516*b7893ccfSSadaf Ebrahimi alignment,
11517*b7893ccfSSadaf Ebrahimi allocFlagsCopy,
11518*b7893ccfSSadaf Ebrahimi createInfo.pUserData,
11519*b7893ccfSSadaf Ebrahimi suballocType,
11520*b7893ccfSSadaf Ebrahimi strategy,
11521*b7893ccfSSadaf Ebrahimi pAllocation);
11522*b7893ccfSSadaf Ebrahimi if(res == VK_SUCCESS)
11523*b7893ccfSSadaf Ebrahimi {
11524*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11525*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
11526*b7893ccfSSadaf Ebrahimi }
11527*b7893ccfSSadaf Ebrahimi }
11528*b7893ccfSSadaf Ebrahimi }
11529*b7893ccfSSadaf Ebrahimi }
11530*b7893ccfSSadaf Ebrahimi
11531*b7893ccfSSadaf Ebrahimi // 2. Try to create new block.
11532*b7893ccfSSadaf Ebrahimi if(canCreateNewBlock)
11533*b7893ccfSSadaf Ebrahimi {
11534*b7893ccfSSadaf Ebrahimi // Calculate optimal size for new block.
11535*b7893ccfSSadaf Ebrahimi VkDeviceSize newBlockSize = m_PreferredBlockSize;
11536*b7893ccfSSadaf Ebrahimi uint32_t newBlockSizeShift = 0;
11537*b7893ccfSSadaf Ebrahimi const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11538*b7893ccfSSadaf Ebrahimi
11539*b7893ccfSSadaf Ebrahimi if(!m_ExplicitBlockSize)
11540*b7893ccfSSadaf Ebrahimi {
11541*b7893ccfSSadaf Ebrahimi // Allocate 1/8, 1/4, 1/2 as first blocks.
11542*b7893ccfSSadaf Ebrahimi const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11543*b7893ccfSSadaf Ebrahimi for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11544*b7893ccfSSadaf Ebrahimi {
11545*b7893ccfSSadaf Ebrahimi const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11546*b7893ccfSSadaf Ebrahimi if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11547*b7893ccfSSadaf Ebrahimi {
11548*b7893ccfSSadaf Ebrahimi newBlockSize = smallerNewBlockSize;
11549*b7893ccfSSadaf Ebrahimi ++newBlockSizeShift;
11550*b7893ccfSSadaf Ebrahimi }
11551*b7893ccfSSadaf Ebrahimi else
11552*b7893ccfSSadaf Ebrahimi {
11553*b7893ccfSSadaf Ebrahimi break;
11554*b7893ccfSSadaf Ebrahimi }
11555*b7893ccfSSadaf Ebrahimi }
11556*b7893ccfSSadaf Ebrahimi }
11557*b7893ccfSSadaf Ebrahimi
11558*b7893ccfSSadaf Ebrahimi size_t newBlockIndex = 0;
11559*b7893ccfSSadaf Ebrahimi VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11560*b7893ccfSSadaf Ebrahimi // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11561*b7893ccfSSadaf Ebrahimi if(!m_ExplicitBlockSize)
11562*b7893ccfSSadaf Ebrahimi {
11563*b7893ccfSSadaf Ebrahimi while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11564*b7893ccfSSadaf Ebrahimi {
11565*b7893ccfSSadaf Ebrahimi const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11566*b7893ccfSSadaf Ebrahimi if(smallerNewBlockSize >= size)
11567*b7893ccfSSadaf Ebrahimi {
11568*b7893ccfSSadaf Ebrahimi newBlockSize = smallerNewBlockSize;
11569*b7893ccfSSadaf Ebrahimi ++newBlockSizeShift;
11570*b7893ccfSSadaf Ebrahimi res = CreateBlock(newBlockSize, &newBlockIndex);
11571*b7893ccfSSadaf Ebrahimi }
11572*b7893ccfSSadaf Ebrahimi else
11573*b7893ccfSSadaf Ebrahimi {
11574*b7893ccfSSadaf Ebrahimi break;
11575*b7893ccfSSadaf Ebrahimi }
11576*b7893ccfSSadaf Ebrahimi }
11577*b7893ccfSSadaf Ebrahimi }
11578*b7893ccfSSadaf Ebrahimi
11579*b7893ccfSSadaf Ebrahimi if(res == VK_SUCCESS)
11580*b7893ccfSSadaf Ebrahimi {
11581*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11582*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11583*b7893ccfSSadaf Ebrahimi
11584*b7893ccfSSadaf Ebrahimi res = AllocateFromBlock(
11585*b7893ccfSSadaf Ebrahimi pBlock,
11586*b7893ccfSSadaf Ebrahimi hCurrentPool,
11587*b7893ccfSSadaf Ebrahimi currentFrameIndex,
11588*b7893ccfSSadaf Ebrahimi size,
11589*b7893ccfSSadaf Ebrahimi alignment,
11590*b7893ccfSSadaf Ebrahimi allocFlagsCopy,
11591*b7893ccfSSadaf Ebrahimi createInfo.pUserData,
11592*b7893ccfSSadaf Ebrahimi suballocType,
11593*b7893ccfSSadaf Ebrahimi strategy,
11594*b7893ccfSSadaf Ebrahimi pAllocation);
11595*b7893ccfSSadaf Ebrahimi if(res == VK_SUCCESS)
11596*b7893ccfSSadaf Ebrahimi {
11597*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11598*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
11599*b7893ccfSSadaf Ebrahimi }
11600*b7893ccfSSadaf Ebrahimi else
11601*b7893ccfSSadaf Ebrahimi {
11602*b7893ccfSSadaf Ebrahimi // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11603*b7893ccfSSadaf Ebrahimi return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11604*b7893ccfSSadaf Ebrahimi }
11605*b7893ccfSSadaf Ebrahimi }
11606*b7893ccfSSadaf Ebrahimi }
11607*b7893ccfSSadaf Ebrahimi }
11608*b7893ccfSSadaf Ebrahimi
11609*b7893ccfSSadaf Ebrahimi // 3. Try to allocate from existing blocks with making other allocations lost.
11610*b7893ccfSSadaf Ebrahimi if(canMakeOtherLost)
11611*b7893ccfSSadaf Ebrahimi {
11612*b7893ccfSSadaf Ebrahimi uint32_t tryIndex = 0;
11613*b7893ccfSSadaf Ebrahimi for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11614*b7893ccfSSadaf Ebrahimi {
11615*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11616*b7893ccfSSadaf Ebrahimi VmaAllocationRequest bestRequest = {};
11617*b7893ccfSSadaf Ebrahimi VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11618*b7893ccfSSadaf Ebrahimi
11619*b7893ccfSSadaf Ebrahimi // 1. Search existing allocations.
11620*b7893ccfSSadaf Ebrahimi if(strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT)
11621*b7893ccfSSadaf Ebrahimi {
11622*b7893ccfSSadaf Ebrahimi // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11623*b7893ccfSSadaf Ebrahimi for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11624*b7893ccfSSadaf Ebrahimi {
11625*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11626*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pCurrBlock);
11627*b7893ccfSSadaf Ebrahimi VmaAllocationRequest currRequest = {};
11628*b7893ccfSSadaf Ebrahimi if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11629*b7893ccfSSadaf Ebrahimi currentFrameIndex,
11630*b7893ccfSSadaf Ebrahimi m_FrameInUseCount,
11631*b7893ccfSSadaf Ebrahimi m_BufferImageGranularity,
11632*b7893ccfSSadaf Ebrahimi size,
11633*b7893ccfSSadaf Ebrahimi alignment,
11634*b7893ccfSSadaf Ebrahimi (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11635*b7893ccfSSadaf Ebrahimi suballocType,
11636*b7893ccfSSadaf Ebrahimi canMakeOtherLost,
11637*b7893ccfSSadaf Ebrahimi strategy,
11638*b7893ccfSSadaf Ebrahimi &currRequest))
11639*b7893ccfSSadaf Ebrahimi {
11640*b7893ccfSSadaf Ebrahimi const VkDeviceSize currRequestCost = currRequest.CalcCost();
11641*b7893ccfSSadaf Ebrahimi if(pBestRequestBlock == VMA_NULL ||
11642*b7893ccfSSadaf Ebrahimi currRequestCost < bestRequestCost)
11643*b7893ccfSSadaf Ebrahimi {
11644*b7893ccfSSadaf Ebrahimi pBestRequestBlock = pCurrBlock;
11645*b7893ccfSSadaf Ebrahimi bestRequest = currRequest;
11646*b7893ccfSSadaf Ebrahimi bestRequestCost = currRequestCost;
11647*b7893ccfSSadaf Ebrahimi
11648*b7893ccfSSadaf Ebrahimi if(bestRequestCost == 0)
11649*b7893ccfSSadaf Ebrahimi {
11650*b7893ccfSSadaf Ebrahimi break;
11651*b7893ccfSSadaf Ebrahimi }
11652*b7893ccfSSadaf Ebrahimi }
11653*b7893ccfSSadaf Ebrahimi }
11654*b7893ccfSSadaf Ebrahimi }
11655*b7893ccfSSadaf Ebrahimi }
11656*b7893ccfSSadaf Ebrahimi else // WORST_FIT, FIRST_FIT
11657*b7893ccfSSadaf Ebrahimi {
11658*b7893ccfSSadaf Ebrahimi // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11659*b7893ccfSSadaf Ebrahimi for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11660*b7893ccfSSadaf Ebrahimi {
11661*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11662*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pCurrBlock);
11663*b7893ccfSSadaf Ebrahimi VmaAllocationRequest currRequest = {};
11664*b7893ccfSSadaf Ebrahimi if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11665*b7893ccfSSadaf Ebrahimi currentFrameIndex,
11666*b7893ccfSSadaf Ebrahimi m_FrameInUseCount,
11667*b7893ccfSSadaf Ebrahimi m_BufferImageGranularity,
11668*b7893ccfSSadaf Ebrahimi size,
11669*b7893ccfSSadaf Ebrahimi alignment,
11670*b7893ccfSSadaf Ebrahimi (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11671*b7893ccfSSadaf Ebrahimi suballocType,
11672*b7893ccfSSadaf Ebrahimi canMakeOtherLost,
11673*b7893ccfSSadaf Ebrahimi strategy,
11674*b7893ccfSSadaf Ebrahimi &currRequest))
11675*b7893ccfSSadaf Ebrahimi {
11676*b7893ccfSSadaf Ebrahimi const VkDeviceSize currRequestCost = currRequest.CalcCost();
11677*b7893ccfSSadaf Ebrahimi if(pBestRequestBlock == VMA_NULL ||
11678*b7893ccfSSadaf Ebrahimi currRequestCost < bestRequestCost ||
11679*b7893ccfSSadaf Ebrahimi strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT)
11680*b7893ccfSSadaf Ebrahimi {
11681*b7893ccfSSadaf Ebrahimi pBestRequestBlock = pCurrBlock;
11682*b7893ccfSSadaf Ebrahimi bestRequest = currRequest;
11683*b7893ccfSSadaf Ebrahimi bestRequestCost = currRequestCost;
11684*b7893ccfSSadaf Ebrahimi
11685*b7893ccfSSadaf Ebrahimi if(bestRequestCost == 0 ||
11686*b7893ccfSSadaf Ebrahimi strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT)
11687*b7893ccfSSadaf Ebrahimi {
11688*b7893ccfSSadaf Ebrahimi break;
11689*b7893ccfSSadaf Ebrahimi }
11690*b7893ccfSSadaf Ebrahimi }
11691*b7893ccfSSadaf Ebrahimi }
11692*b7893ccfSSadaf Ebrahimi }
11693*b7893ccfSSadaf Ebrahimi }
11694*b7893ccfSSadaf Ebrahimi
11695*b7893ccfSSadaf Ebrahimi if(pBestRequestBlock != VMA_NULL)
11696*b7893ccfSSadaf Ebrahimi {
11697*b7893ccfSSadaf Ebrahimi if(mapped)
11698*b7893ccfSSadaf Ebrahimi {
11699*b7893ccfSSadaf Ebrahimi VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11700*b7893ccfSSadaf Ebrahimi if(res != VK_SUCCESS)
11701*b7893ccfSSadaf Ebrahimi {
11702*b7893ccfSSadaf Ebrahimi return res;
11703*b7893ccfSSadaf Ebrahimi }
11704*b7893ccfSSadaf Ebrahimi }
11705*b7893ccfSSadaf Ebrahimi
11706*b7893ccfSSadaf Ebrahimi if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11707*b7893ccfSSadaf Ebrahimi currentFrameIndex,
11708*b7893ccfSSadaf Ebrahimi m_FrameInUseCount,
11709*b7893ccfSSadaf Ebrahimi &bestRequest))
11710*b7893ccfSSadaf Ebrahimi {
11711*b7893ccfSSadaf Ebrahimi // We no longer have an empty Allocation.
11712*b7893ccfSSadaf Ebrahimi if(pBestRequestBlock->m_pMetadata->IsEmpty())
11713*b7893ccfSSadaf Ebrahimi {
11714*b7893ccfSSadaf Ebrahimi m_HasEmptyBlock = false;
11715*b7893ccfSSadaf Ebrahimi }
11716*b7893ccfSSadaf Ebrahimi // Allocate from this pBlock.
11717*b7893ccfSSadaf Ebrahimi *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11718*b7893ccfSSadaf Ebrahimi pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
11719*b7893ccfSSadaf Ebrahimi (*pAllocation)->InitBlockAllocation(
11720*b7893ccfSSadaf Ebrahimi hCurrentPool,
11721*b7893ccfSSadaf Ebrahimi pBestRequestBlock,
11722*b7893ccfSSadaf Ebrahimi bestRequest.offset,
11723*b7893ccfSSadaf Ebrahimi alignment,
11724*b7893ccfSSadaf Ebrahimi size,
11725*b7893ccfSSadaf Ebrahimi suballocType,
11726*b7893ccfSSadaf Ebrahimi mapped,
11727*b7893ccfSSadaf Ebrahimi (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11728*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11729*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
11730*b7893ccfSSadaf Ebrahimi (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11731*b7893ccfSSadaf Ebrahimi if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11732*b7893ccfSSadaf Ebrahimi {
11733*b7893ccfSSadaf Ebrahimi m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11734*b7893ccfSSadaf Ebrahimi }
11735*b7893ccfSSadaf Ebrahimi if(IsCorruptionDetectionEnabled())
11736*b7893ccfSSadaf Ebrahimi {
11737*b7893ccfSSadaf Ebrahimi VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11738*b7893ccfSSadaf Ebrahimi VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11739*b7893ccfSSadaf Ebrahimi }
11740*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
11741*b7893ccfSSadaf Ebrahimi }
11742*b7893ccfSSadaf Ebrahimi // else: Some allocations must have been touched while we are here. Next try.
11743*b7893ccfSSadaf Ebrahimi }
11744*b7893ccfSSadaf Ebrahimi else
11745*b7893ccfSSadaf Ebrahimi {
11746*b7893ccfSSadaf Ebrahimi // Could not find place in any of the blocks - break outer loop.
11747*b7893ccfSSadaf Ebrahimi break;
11748*b7893ccfSSadaf Ebrahimi }
11749*b7893ccfSSadaf Ebrahimi }
11750*b7893ccfSSadaf Ebrahimi /* Maximum number of tries exceeded - a very unlike event when many other
11751*b7893ccfSSadaf Ebrahimi threads are simultaneously touching allocations making it impossible to make
11752*b7893ccfSSadaf Ebrahimi lost at the same time as we try to allocate. */
11753*b7893ccfSSadaf Ebrahimi if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11754*b7893ccfSSadaf Ebrahimi {
11755*b7893ccfSSadaf Ebrahimi return VK_ERROR_TOO_MANY_OBJECTS;
11756*b7893ccfSSadaf Ebrahimi }
11757*b7893ccfSSadaf Ebrahimi }
11758*b7893ccfSSadaf Ebrahimi
11759*b7893ccfSSadaf Ebrahimi return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11760*b7893ccfSSadaf Ebrahimi }
11761*b7893ccfSSadaf Ebrahimi
Free(VmaAllocation hAllocation)11762*b7893ccfSSadaf Ebrahimi void VmaBlockVector::Free(
11763*b7893ccfSSadaf Ebrahimi VmaAllocation hAllocation)
11764*b7893ccfSSadaf Ebrahimi {
11765*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11766*b7893ccfSSadaf Ebrahimi
11767*b7893ccfSSadaf Ebrahimi // Scope for lock.
11768*b7893ccfSSadaf Ebrahimi {
11769*b7893ccfSSadaf Ebrahimi VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11770*b7893ccfSSadaf Ebrahimi
11771*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11772*b7893ccfSSadaf Ebrahimi
11773*b7893ccfSSadaf Ebrahimi if(IsCorruptionDetectionEnabled())
11774*b7893ccfSSadaf Ebrahimi {
11775*b7893ccfSSadaf Ebrahimi VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11776*b7893ccfSSadaf Ebrahimi VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11777*b7893ccfSSadaf Ebrahimi }
11778*b7893ccfSSadaf Ebrahimi
11779*b7893ccfSSadaf Ebrahimi if(hAllocation->IsPersistentMap())
11780*b7893ccfSSadaf Ebrahimi {
11781*b7893ccfSSadaf Ebrahimi pBlock->Unmap(m_hAllocator, 1);
11782*b7893ccfSSadaf Ebrahimi }
11783*b7893ccfSSadaf Ebrahimi
11784*b7893ccfSSadaf Ebrahimi pBlock->m_pMetadata->Free(hAllocation);
11785*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(pBlock->Validate());
11786*b7893ccfSSadaf Ebrahimi
11787*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
11788*b7893ccfSSadaf Ebrahimi
11789*b7893ccfSSadaf Ebrahimi // pBlock became empty after this deallocation.
11790*b7893ccfSSadaf Ebrahimi if(pBlock->m_pMetadata->IsEmpty())
11791*b7893ccfSSadaf Ebrahimi {
11792*b7893ccfSSadaf Ebrahimi // Already has empty Allocation. We don't want to have two, so delete this one.
11793*b7893ccfSSadaf Ebrahimi if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11794*b7893ccfSSadaf Ebrahimi {
11795*b7893ccfSSadaf Ebrahimi pBlockToDelete = pBlock;
11796*b7893ccfSSadaf Ebrahimi Remove(pBlock);
11797*b7893ccfSSadaf Ebrahimi }
11798*b7893ccfSSadaf Ebrahimi // We now have first empty block.
11799*b7893ccfSSadaf Ebrahimi else
11800*b7893ccfSSadaf Ebrahimi {
11801*b7893ccfSSadaf Ebrahimi m_HasEmptyBlock = true;
11802*b7893ccfSSadaf Ebrahimi }
11803*b7893ccfSSadaf Ebrahimi }
11804*b7893ccfSSadaf Ebrahimi // pBlock didn't become empty, but we have another empty block - find and free that one.
11805*b7893ccfSSadaf Ebrahimi // (This is optional, heuristics.)
11806*b7893ccfSSadaf Ebrahimi else if(m_HasEmptyBlock)
11807*b7893ccfSSadaf Ebrahimi {
11808*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11809*b7893ccfSSadaf Ebrahimi if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11810*b7893ccfSSadaf Ebrahimi {
11811*b7893ccfSSadaf Ebrahimi pBlockToDelete = pLastBlock;
11812*b7893ccfSSadaf Ebrahimi m_Blocks.pop_back();
11813*b7893ccfSSadaf Ebrahimi m_HasEmptyBlock = false;
11814*b7893ccfSSadaf Ebrahimi }
11815*b7893ccfSSadaf Ebrahimi }
11816*b7893ccfSSadaf Ebrahimi
11817*b7893ccfSSadaf Ebrahimi IncrementallySortBlocks();
11818*b7893ccfSSadaf Ebrahimi }
11819*b7893ccfSSadaf Ebrahimi
11820*b7893ccfSSadaf Ebrahimi // Destruction of a free Allocation. Deferred until this point, outside of mutex
11821*b7893ccfSSadaf Ebrahimi // lock, for performance reason.
11822*b7893ccfSSadaf Ebrahimi if(pBlockToDelete != VMA_NULL)
11823*b7893ccfSSadaf Ebrahimi {
11824*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG(" Deleted empty allocation");
11825*b7893ccfSSadaf Ebrahimi pBlockToDelete->Destroy(m_hAllocator);
11826*b7893ccfSSadaf Ebrahimi vma_delete(m_hAllocator, pBlockToDelete);
11827*b7893ccfSSadaf Ebrahimi }
11828*b7893ccfSSadaf Ebrahimi }
11829*b7893ccfSSadaf Ebrahimi
CalcMaxBlockSize()11830*b7893ccfSSadaf Ebrahimi VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11831*b7893ccfSSadaf Ebrahimi {
11832*b7893ccfSSadaf Ebrahimi VkDeviceSize result = 0;
11833*b7893ccfSSadaf Ebrahimi for(size_t i = m_Blocks.size(); i--; )
11834*b7893ccfSSadaf Ebrahimi {
11835*b7893ccfSSadaf Ebrahimi result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11836*b7893ccfSSadaf Ebrahimi if(result >= m_PreferredBlockSize)
11837*b7893ccfSSadaf Ebrahimi {
11838*b7893ccfSSadaf Ebrahimi break;
11839*b7893ccfSSadaf Ebrahimi }
11840*b7893ccfSSadaf Ebrahimi }
11841*b7893ccfSSadaf Ebrahimi return result;
11842*b7893ccfSSadaf Ebrahimi }
11843*b7893ccfSSadaf Ebrahimi
Remove(VmaDeviceMemoryBlock * pBlock)11844*b7893ccfSSadaf Ebrahimi void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
11845*b7893ccfSSadaf Ebrahimi {
11846*b7893ccfSSadaf Ebrahimi for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11847*b7893ccfSSadaf Ebrahimi {
11848*b7893ccfSSadaf Ebrahimi if(m_Blocks[blockIndex] == pBlock)
11849*b7893ccfSSadaf Ebrahimi {
11850*b7893ccfSSadaf Ebrahimi VmaVectorRemove(m_Blocks, blockIndex);
11851*b7893ccfSSadaf Ebrahimi return;
11852*b7893ccfSSadaf Ebrahimi }
11853*b7893ccfSSadaf Ebrahimi }
11854*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
11855*b7893ccfSSadaf Ebrahimi }
11856*b7893ccfSSadaf Ebrahimi
IncrementallySortBlocks()11857*b7893ccfSSadaf Ebrahimi void VmaBlockVector::IncrementallySortBlocks()
11858*b7893ccfSSadaf Ebrahimi {
11859*b7893ccfSSadaf Ebrahimi if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11860*b7893ccfSSadaf Ebrahimi {
11861*b7893ccfSSadaf Ebrahimi // Bubble sort only until first swap.
11862*b7893ccfSSadaf Ebrahimi for(size_t i = 1; i < m_Blocks.size(); ++i)
11863*b7893ccfSSadaf Ebrahimi {
11864*b7893ccfSSadaf Ebrahimi if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
11865*b7893ccfSSadaf Ebrahimi {
11866*b7893ccfSSadaf Ebrahimi VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
11867*b7893ccfSSadaf Ebrahimi return;
11868*b7893ccfSSadaf Ebrahimi }
11869*b7893ccfSSadaf Ebrahimi }
11870*b7893ccfSSadaf Ebrahimi }
11871*b7893ccfSSadaf Ebrahimi }
11872*b7893ccfSSadaf Ebrahimi
AllocateFromBlock(VmaDeviceMemoryBlock * pBlock,VmaPool hCurrentPool,uint32_t currentFrameIndex,VkDeviceSize size,VkDeviceSize alignment,VmaAllocationCreateFlags allocFlags,void * pUserData,VmaSuballocationType suballocType,uint32_t strategy,VmaAllocation * pAllocation)11873*b7893ccfSSadaf Ebrahimi VkResult VmaBlockVector::AllocateFromBlock(
11874*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* pBlock,
11875*b7893ccfSSadaf Ebrahimi VmaPool hCurrentPool,
11876*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
11877*b7893ccfSSadaf Ebrahimi VkDeviceSize size,
11878*b7893ccfSSadaf Ebrahimi VkDeviceSize alignment,
11879*b7893ccfSSadaf Ebrahimi VmaAllocationCreateFlags allocFlags,
11880*b7893ccfSSadaf Ebrahimi void* pUserData,
11881*b7893ccfSSadaf Ebrahimi VmaSuballocationType suballocType,
11882*b7893ccfSSadaf Ebrahimi uint32_t strategy,
11883*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocation)
11884*b7893ccfSSadaf Ebrahimi {
11885*b7893ccfSSadaf Ebrahimi VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
11886*b7893ccfSSadaf Ebrahimi const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11887*b7893ccfSSadaf Ebrahimi const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11888*b7893ccfSSadaf Ebrahimi const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11889*b7893ccfSSadaf Ebrahimi
11890*b7893ccfSSadaf Ebrahimi VmaAllocationRequest currRequest = {};
11891*b7893ccfSSadaf Ebrahimi if(pBlock->m_pMetadata->CreateAllocationRequest(
11892*b7893ccfSSadaf Ebrahimi currentFrameIndex,
11893*b7893ccfSSadaf Ebrahimi m_FrameInUseCount,
11894*b7893ccfSSadaf Ebrahimi m_BufferImageGranularity,
11895*b7893ccfSSadaf Ebrahimi size,
11896*b7893ccfSSadaf Ebrahimi alignment,
11897*b7893ccfSSadaf Ebrahimi isUpperAddress,
11898*b7893ccfSSadaf Ebrahimi suballocType,
11899*b7893ccfSSadaf Ebrahimi false, // canMakeOtherLost
11900*b7893ccfSSadaf Ebrahimi strategy,
11901*b7893ccfSSadaf Ebrahimi &currRequest))
11902*b7893ccfSSadaf Ebrahimi {
11903*b7893ccfSSadaf Ebrahimi // Allocate from pCurrBlock.
11904*b7893ccfSSadaf Ebrahimi VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
11905*b7893ccfSSadaf Ebrahimi
11906*b7893ccfSSadaf Ebrahimi if(mapped)
11907*b7893ccfSSadaf Ebrahimi {
11908*b7893ccfSSadaf Ebrahimi VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
11909*b7893ccfSSadaf Ebrahimi if(res != VK_SUCCESS)
11910*b7893ccfSSadaf Ebrahimi {
11911*b7893ccfSSadaf Ebrahimi return res;
11912*b7893ccfSSadaf Ebrahimi }
11913*b7893ccfSSadaf Ebrahimi }
11914*b7893ccfSSadaf Ebrahimi
11915*b7893ccfSSadaf Ebrahimi // We no longer have an empty Allocation.
11916*b7893ccfSSadaf Ebrahimi if(pBlock->m_pMetadata->IsEmpty())
11917*b7893ccfSSadaf Ebrahimi {
11918*b7893ccfSSadaf Ebrahimi m_HasEmptyBlock = false;
11919*b7893ccfSSadaf Ebrahimi }
11920*b7893ccfSSadaf Ebrahimi
11921*b7893ccfSSadaf Ebrahimi *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11922*b7893ccfSSadaf Ebrahimi pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
11923*b7893ccfSSadaf Ebrahimi (*pAllocation)->InitBlockAllocation(
11924*b7893ccfSSadaf Ebrahimi hCurrentPool,
11925*b7893ccfSSadaf Ebrahimi pBlock,
11926*b7893ccfSSadaf Ebrahimi currRequest.offset,
11927*b7893ccfSSadaf Ebrahimi alignment,
11928*b7893ccfSSadaf Ebrahimi size,
11929*b7893ccfSSadaf Ebrahimi suballocType,
11930*b7893ccfSSadaf Ebrahimi mapped,
11931*b7893ccfSSadaf Ebrahimi (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11932*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(pBlock->Validate());
11933*b7893ccfSSadaf Ebrahimi (*pAllocation)->SetUserData(m_hAllocator, pUserData);
11934*b7893ccfSSadaf Ebrahimi if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11935*b7893ccfSSadaf Ebrahimi {
11936*b7893ccfSSadaf Ebrahimi m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11937*b7893ccfSSadaf Ebrahimi }
11938*b7893ccfSSadaf Ebrahimi if(IsCorruptionDetectionEnabled())
11939*b7893ccfSSadaf Ebrahimi {
11940*b7893ccfSSadaf Ebrahimi VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
11941*b7893ccfSSadaf Ebrahimi VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11942*b7893ccfSSadaf Ebrahimi }
11943*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
11944*b7893ccfSSadaf Ebrahimi }
11945*b7893ccfSSadaf Ebrahimi return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11946*b7893ccfSSadaf Ebrahimi }
11947*b7893ccfSSadaf Ebrahimi
CreateBlock(VkDeviceSize blockSize,size_t * pNewBlockIndex)11948*b7893ccfSSadaf Ebrahimi VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
11949*b7893ccfSSadaf Ebrahimi {
11950*b7893ccfSSadaf Ebrahimi VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
11951*b7893ccfSSadaf Ebrahimi allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
11952*b7893ccfSSadaf Ebrahimi allocInfo.allocationSize = blockSize;
11953*b7893ccfSSadaf Ebrahimi VkDeviceMemory mem = VK_NULL_HANDLE;
11954*b7893ccfSSadaf Ebrahimi VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
11955*b7893ccfSSadaf Ebrahimi if(res < 0)
11956*b7893ccfSSadaf Ebrahimi {
11957*b7893ccfSSadaf Ebrahimi return res;
11958*b7893ccfSSadaf Ebrahimi }
11959*b7893ccfSSadaf Ebrahimi
11960*b7893ccfSSadaf Ebrahimi // New VkDeviceMemory successfully created.
11961*b7893ccfSSadaf Ebrahimi
11962*b7893ccfSSadaf Ebrahimi // Create new Allocation for it.
11963*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
11964*b7893ccfSSadaf Ebrahimi pBlock->Init(
11965*b7893ccfSSadaf Ebrahimi m_hAllocator,
11966*b7893ccfSSadaf Ebrahimi m_MemoryTypeIndex,
11967*b7893ccfSSadaf Ebrahimi mem,
11968*b7893ccfSSadaf Ebrahimi allocInfo.allocationSize,
11969*b7893ccfSSadaf Ebrahimi m_NextBlockId++,
11970*b7893ccfSSadaf Ebrahimi m_Algorithm);
11971*b7893ccfSSadaf Ebrahimi
11972*b7893ccfSSadaf Ebrahimi m_Blocks.push_back(pBlock);
11973*b7893ccfSSadaf Ebrahimi if(pNewBlockIndex != VMA_NULL)
11974*b7893ccfSSadaf Ebrahimi {
11975*b7893ccfSSadaf Ebrahimi *pNewBlockIndex = m_Blocks.size() - 1;
11976*b7893ccfSSadaf Ebrahimi }
11977*b7893ccfSSadaf Ebrahimi
11978*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
11979*b7893ccfSSadaf Ebrahimi }
11980*b7893ccfSSadaf Ebrahimi
ApplyDefragmentationMovesCpu(class VmaBlockVectorDefragmentationContext * pDefragCtx,const VmaVector<VmaDefragmentationMove,VmaStlAllocator<VmaDefragmentationMove>> & moves)11981*b7893ccfSSadaf Ebrahimi void VmaBlockVector::ApplyDefragmentationMovesCpu(
11982*b7893ccfSSadaf Ebrahimi class VmaBlockVectorDefragmentationContext* pDefragCtx,
11983*b7893ccfSSadaf Ebrahimi const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
11984*b7893ccfSSadaf Ebrahimi {
11985*b7893ccfSSadaf Ebrahimi const size_t blockCount = m_Blocks.size();
11986*b7893ccfSSadaf Ebrahimi const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
11987*b7893ccfSSadaf Ebrahimi
11988*b7893ccfSSadaf Ebrahimi enum BLOCK_FLAG
11989*b7893ccfSSadaf Ebrahimi {
11990*b7893ccfSSadaf Ebrahimi BLOCK_FLAG_USED = 0x00000001,
11991*b7893ccfSSadaf Ebrahimi BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
11992*b7893ccfSSadaf Ebrahimi };
11993*b7893ccfSSadaf Ebrahimi
11994*b7893ccfSSadaf Ebrahimi struct BlockInfo
11995*b7893ccfSSadaf Ebrahimi {
11996*b7893ccfSSadaf Ebrahimi uint32_t flags;
11997*b7893ccfSSadaf Ebrahimi void* pMappedData;
11998*b7893ccfSSadaf Ebrahimi };
11999*b7893ccfSSadaf Ebrahimi VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12000*b7893ccfSSadaf Ebrahimi blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12001*b7893ccfSSadaf Ebrahimi memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12002*b7893ccfSSadaf Ebrahimi
12003*b7893ccfSSadaf Ebrahimi // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12004*b7893ccfSSadaf Ebrahimi const size_t moveCount = moves.size();
12005*b7893ccfSSadaf Ebrahimi for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12006*b7893ccfSSadaf Ebrahimi {
12007*b7893ccfSSadaf Ebrahimi const VmaDefragmentationMove& move = moves[moveIndex];
12008*b7893ccfSSadaf Ebrahimi blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12009*b7893ccfSSadaf Ebrahimi blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12010*b7893ccfSSadaf Ebrahimi }
12011*b7893ccfSSadaf Ebrahimi
12012*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12013*b7893ccfSSadaf Ebrahimi
12014*b7893ccfSSadaf Ebrahimi // Go over all blocks. Get mapped pointer or map if necessary.
12015*b7893ccfSSadaf Ebrahimi for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12016*b7893ccfSSadaf Ebrahimi {
12017*b7893ccfSSadaf Ebrahimi BlockInfo& currBlockInfo = blockInfo[blockIndex];
12018*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12019*b7893ccfSSadaf Ebrahimi if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12020*b7893ccfSSadaf Ebrahimi {
12021*b7893ccfSSadaf Ebrahimi currBlockInfo.pMappedData = pBlock->GetMappedData();
12022*b7893ccfSSadaf Ebrahimi // It is not originally mapped - map it.
12023*b7893ccfSSadaf Ebrahimi if(currBlockInfo.pMappedData == VMA_NULL)
12024*b7893ccfSSadaf Ebrahimi {
12025*b7893ccfSSadaf Ebrahimi pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12026*b7893ccfSSadaf Ebrahimi if(pDefragCtx->res == VK_SUCCESS)
12027*b7893ccfSSadaf Ebrahimi {
12028*b7893ccfSSadaf Ebrahimi currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12029*b7893ccfSSadaf Ebrahimi }
12030*b7893ccfSSadaf Ebrahimi }
12031*b7893ccfSSadaf Ebrahimi }
12032*b7893ccfSSadaf Ebrahimi }
12033*b7893ccfSSadaf Ebrahimi
12034*b7893ccfSSadaf Ebrahimi // Go over all moves. Do actual data transfer.
12035*b7893ccfSSadaf Ebrahimi if(pDefragCtx->res == VK_SUCCESS)
12036*b7893ccfSSadaf Ebrahimi {
12037*b7893ccfSSadaf Ebrahimi const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12038*b7893ccfSSadaf Ebrahimi VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12039*b7893ccfSSadaf Ebrahimi
12040*b7893ccfSSadaf Ebrahimi for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12041*b7893ccfSSadaf Ebrahimi {
12042*b7893ccfSSadaf Ebrahimi const VmaDefragmentationMove& move = moves[moveIndex];
12043*b7893ccfSSadaf Ebrahimi
12044*b7893ccfSSadaf Ebrahimi const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12045*b7893ccfSSadaf Ebrahimi const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12046*b7893ccfSSadaf Ebrahimi
12047*b7893ccfSSadaf Ebrahimi VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12048*b7893ccfSSadaf Ebrahimi
12049*b7893ccfSSadaf Ebrahimi // Invalidate source.
12050*b7893ccfSSadaf Ebrahimi if(isNonCoherent)
12051*b7893ccfSSadaf Ebrahimi {
12052*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12053*b7893ccfSSadaf Ebrahimi memRange.memory = pSrcBlock->GetDeviceMemory();
12054*b7893ccfSSadaf Ebrahimi memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12055*b7893ccfSSadaf Ebrahimi memRange.size = VMA_MIN(
12056*b7893ccfSSadaf Ebrahimi VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12057*b7893ccfSSadaf Ebrahimi pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12058*b7893ccfSSadaf Ebrahimi (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12059*b7893ccfSSadaf Ebrahimi }
12060*b7893ccfSSadaf Ebrahimi
12061*b7893ccfSSadaf Ebrahimi // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12062*b7893ccfSSadaf Ebrahimi memmove(
12063*b7893ccfSSadaf Ebrahimi reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12064*b7893ccfSSadaf Ebrahimi reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12065*b7893ccfSSadaf Ebrahimi static_cast<size_t>(move.size));
12066*b7893ccfSSadaf Ebrahimi
12067*b7893ccfSSadaf Ebrahimi if(IsCorruptionDetectionEnabled())
12068*b7893ccfSSadaf Ebrahimi {
12069*b7893ccfSSadaf Ebrahimi VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12070*b7893ccfSSadaf Ebrahimi VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12071*b7893ccfSSadaf Ebrahimi }
12072*b7893ccfSSadaf Ebrahimi
12073*b7893ccfSSadaf Ebrahimi // Flush destination.
12074*b7893ccfSSadaf Ebrahimi if(isNonCoherent)
12075*b7893ccfSSadaf Ebrahimi {
12076*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12077*b7893ccfSSadaf Ebrahimi memRange.memory = pDstBlock->GetDeviceMemory();
12078*b7893ccfSSadaf Ebrahimi memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12079*b7893ccfSSadaf Ebrahimi memRange.size = VMA_MIN(
12080*b7893ccfSSadaf Ebrahimi VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12081*b7893ccfSSadaf Ebrahimi pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12082*b7893ccfSSadaf Ebrahimi (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12083*b7893ccfSSadaf Ebrahimi }
12084*b7893ccfSSadaf Ebrahimi }
12085*b7893ccfSSadaf Ebrahimi }
12086*b7893ccfSSadaf Ebrahimi
12087*b7893ccfSSadaf Ebrahimi // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12088*b7893ccfSSadaf Ebrahimi // Regardless of pCtx->res == VK_SUCCESS.
12089*b7893ccfSSadaf Ebrahimi for(size_t blockIndex = blockCount; blockIndex--; )
12090*b7893ccfSSadaf Ebrahimi {
12091*b7893ccfSSadaf Ebrahimi const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12092*b7893ccfSSadaf Ebrahimi if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12093*b7893ccfSSadaf Ebrahimi {
12094*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12095*b7893ccfSSadaf Ebrahimi pBlock->Unmap(m_hAllocator, 1);
12096*b7893ccfSSadaf Ebrahimi }
12097*b7893ccfSSadaf Ebrahimi }
12098*b7893ccfSSadaf Ebrahimi }
12099*b7893ccfSSadaf Ebrahimi
ApplyDefragmentationMovesGpu(class VmaBlockVectorDefragmentationContext * pDefragCtx,const VmaVector<VmaDefragmentationMove,VmaStlAllocator<VmaDefragmentationMove>> & moves,VkCommandBuffer commandBuffer)12100*b7893ccfSSadaf Ebrahimi void VmaBlockVector::ApplyDefragmentationMovesGpu(
12101*b7893ccfSSadaf Ebrahimi class VmaBlockVectorDefragmentationContext* pDefragCtx,
12102*b7893ccfSSadaf Ebrahimi const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12103*b7893ccfSSadaf Ebrahimi VkCommandBuffer commandBuffer)
12104*b7893ccfSSadaf Ebrahimi {
12105*b7893ccfSSadaf Ebrahimi const size_t blockCount = m_Blocks.size();
12106*b7893ccfSSadaf Ebrahimi
12107*b7893ccfSSadaf Ebrahimi pDefragCtx->blockContexts.resize(blockCount);
12108*b7893ccfSSadaf Ebrahimi memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12109*b7893ccfSSadaf Ebrahimi
12110*b7893ccfSSadaf Ebrahimi // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12111*b7893ccfSSadaf Ebrahimi const size_t moveCount = moves.size();
12112*b7893ccfSSadaf Ebrahimi for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12113*b7893ccfSSadaf Ebrahimi {
12114*b7893ccfSSadaf Ebrahimi const VmaDefragmentationMove& move = moves[moveIndex];
12115*b7893ccfSSadaf Ebrahimi pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12116*b7893ccfSSadaf Ebrahimi pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12117*b7893ccfSSadaf Ebrahimi }
12118*b7893ccfSSadaf Ebrahimi
12119*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12120*b7893ccfSSadaf Ebrahimi
12121*b7893ccfSSadaf Ebrahimi // Go over all blocks. Create and bind buffer for whole block if necessary.
12122*b7893ccfSSadaf Ebrahimi {
12123*b7893ccfSSadaf Ebrahimi VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
12124*b7893ccfSSadaf Ebrahimi bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
12125*b7893ccfSSadaf Ebrahimi VK_BUFFER_USAGE_TRANSFER_DST_BIT;
12126*b7893ccfSSadaf Ebrahimi
12127*b7893ccfSSadaf Ebrahimi for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12128*b7893ccfSSadaf Ebrahimi {
12129*b7893ccfSSadaf Ebrahimi VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12130*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12131*b7893ccfSSadaf Ebrahimi if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12132*b7893ccfSSadaf Ebrahimi {
12133*b7893ccfSSadaf Ebrahimi bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12134*b7893ccfSSadaf Ebrahimi pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12135*b7893ccfSSadaf Ebrahimi m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12136*b7893ccfSSadaf Ebrahimi if(pDefragCtx->res == VK_SUCCESS)
12137*b7893ccfSSadaf Ebrahimi {
12138*b7893ccfSSadaf Ebrahimi pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12139*b7893ccfSSadaf Ebrahimi m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12140*b7893ccfSSadaf Ebrahimi }
12141*b7893ccfSSadaf Ebrahimi }
12142*b7893ccfSSadaf Ebrahimi }
12143*b7893ccfSSadaf Ebrahimi }
12144*b7893ccfSSadaf Ebrahimi
12145*b7893ccfSSadaf Ebrahimi // Go over all moves. Post data transfer commands to command buffer.
12146*b7893ccfSSadaf Ebrahimi if(pDefragCtx->res == VK_SUCCESS)
12147*b7893ccfSSadaf Ebrahimi {
12148*b7893ccfSSadaf Ebrahimi const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12149*b7893ccfSSadaf Ebrahimi VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12150*b7893ccfSSadaf Ebrahimi
12151*b7893ccfSSadaf Ebrahimi for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12152*b7893ccfSSadaf Ebrahimi {
12153*b7893ccfSSadaf Ebrahimi const VmaDefragmentationMove& move = moves[moveIndex];
12154*b7893ccfSSadaf Ebrahimi
12155*b7893ccfSSadaf Ebrahimi const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12156*b7893ccfSSadaf Ebrahimi const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12157*b7893ccfSSadaf Ebrahimi
12158*b7893ccfSSadaf Ebrahimi VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12159*b7893ccfSSadaf Ebrahimi
12160*b7893ccfSSadaf Ebrahimi VkBufferCopy region = {
12161*b7893ccfSSadaf Ebrahimi move.srcOffset,
12162*b7893ccfSSadaf Ebrahimi move.dstOffset,
12163*b7893ccfSSadaf Ebrahimi move.size };
12164*b7893ccfSSadaf Ebrahimi (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12165*b7893ccfSSadaf Ebrahimi commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, ®ion);
12166*b7893ccfSSadaf Ebrahimi }
12167*b7893ccfSSadaf Ebrahimi }
12168*b7893ccfSSadaf Ebrahimi
12169*b7893ccfSSadaf Ebrahimi // Save buffers to defrag context for later destruction.
12170*b7893ccfSSadaf Ebrahimi if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12171*b7893ccfSSadaf Ebrahimi {
12172*b7893ccfSSadaf Ebrahimi pDefragCtx->res = VK_NOT_READY;
12173*b7893ccfSSadaf Ebrahimi }
12174*b7893ccfSSadaf Ebrahimi }
12175*b7893ccfSSadaf Ebrahimi
FreeEmptyBlocks(VmaDefragmentationStats * pDefragmentationStats)12176*b7893ccfSSadaf Ebrahimi void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12177*b7893ccfSSadaf Ebrahimi {
12178*b7893ccfSSadaf Ebrahimi m_HasEmptyBlock = false;
12179*b7893ccfSSadaf Ebrahimi for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12180*b7893ccfSSadaf Ebrahimi {
12181*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12182*b7893ccfSSadaf Ebrahimi if(pBlock->m_pMetadata->IsEmpty())
12183*b7893ccfSSadaf Ebrahimi {
12184*b7893ccfSSadaf Ebrahimi if(m_Blocks.size() > m_MinBlockCount)
12185*b7893ccfSSadaf Ebrahimi {
12186*b7893ccfSSadaf Ebrahimi if(pDefragmentationStats != VMA_NULL)
12187*b7893ccfSSadaf Ebrahimi {
12188*b7893ccfSSadaf Ebrahimi ++pDefragmentationStats->deviceMemoryBlocksFreed;
12189*b7893ccfSSadaf Ebrahimi pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12190*b7893ccfSSadaf Ebrahimi }
12191*b7893ccfSSadaf Ebrahimi
12192*b7893ccfSSadaf Ebrahimi VmaVectorRemove(m_Blocks, blockIndex);
12193*b7893ccfSSadaf Ebrahimi pBlock->Destroy(m_hAllocator);
12194*b7893ccfSSadaf Ebrahimi vma_delete(m_hAllocator, pBlock);
12195*b7893ccfSSadaf Ebrahimi }
12196*b7893ccfSSadaf Ebrahimi else
12197*b7893ccfSSadaf Ebrahimi {
12198*b7893ccfSSadaf Ebrahimi m_HasEmptyBlock = true;
12199*b7893ccfSSadaf Ebrahimi }
12200*b7893ccfSSadaf Ebrahimi }
12201*b7893ccfSSadaf Ebrahimi }
12202*b7893ccfSSadaf Ebrahimi }
12203*b7893ccfSSadaf Ebrahimi
12204*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
12205*b7893ccfSSadaf Ebrahimi
PrintDetailedMap(class VmaJsonWriter & json)12206*b7893ccfSSadaf Ebrahimi void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12207*b7893ccfSSadaf Ebrahimi {
12208*b7893ccfSSadaf Ebrahimi VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12209*b7893ccfSSadaf Ebrahimi
12210*b7893ccfSSadaf Ebrahimi json.BeginObject();
12211*b7893ccfSSadaf Ebrahimi
12212*b7893ccfSSadaf Ebrahimi if(m_IsCustomPool)
12213*b7893ccfSSadaf Ebrahimi {
12214*b7893ccfSSadaf Ebrahimi json.WriteString("MemoryTypeIndex");
12215*b7893ccfSSadaf Ebrahimi json.WriteNumber(m_MemoryTypeIndex);
12216*b7893ccfSSadaf Ebrahimi
12217*b7893ccfSSadaf Ebrahimi json.WriteString("BlockSize");
12218*b7893ccfSSadaf Ebrahimi json.WriteNumber(m_PreferredBlockSize);
12219*b7893ccfSSadaf Ebrahimi
12220*b7893ccfSSadaf Ebrahimi json.WriteString("BlockCount");
12221*b7893ccfSSadaf Ebrahimi json.BeginObject(true);
12222*b7893ccfSSadaf Ebrahimi if(m_MinBlockCount > 0)
12223*b7893ccfSSadaf Ebrahimi {
12224*b7893ccfSSadaf Ebrahimi json.WriteString("Min");
12225*b7893ccfSSadaf Ebrahimi json.WriteNumber((uint64_t)m_MinBlockCount);
12226*b7893ccfSSadaf Ebrahimi }
12227*b7893ccfSSadaf Ebrahimi if(m_MaxBlockCount < SIZE_MAX)
12228*b7893ccfSSadaf Ebrahimi {
12229*b7893ccfSSadaf Ebrahimi json.WriteString("Max");
12230*b7893ccfSSadaf Ebrahimi json.WriteNumber((uint64_t)m_MaxBlockCount);
12231*b7893ccfSSadaf Ebrahimi }
12232*b7893ccfSSadaf Ebrahimi json.WriteString("Cur");
12233*b7893ccfSSadaf Ebrahimi json.WriteNumber((uint64_t)m_Blocks.size());
12234*b7893ccfSSadaf Ebrahimi json.EndObject();
12235*b7893ccfSSadaf Ebrahimi
12236*b7893ccfSSadaf Ebrahimi if(m_FrameInUseCount > 0)
12237*b7893ccfSSadaf Ebrahimi {
12238*b7893ccfSSadaf Ebrahimi json.WriteString("FrameInUseCount");
12239*b7893ccfSSadaf Ebrahimi json.WriteNumber(m_FrameInUseCount);
12240*b7893ccfSSadaf Ebrahimi }
12241*b7893ccfSSadaf Ebrahimi
12242*b7893ccfSSadaf Ebrahimi if(m_Algorithm != 0)
12243*b7893ccfSSadaf Ebrahimi {
12244*b7893ccfSSadaf Ebrahimi json.WriteString("Algorithm");
12245*b7893ccfSSadaf Ebrahimi json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12246*b7893ccfSSadaf Ebrahimi }
12247*b7893ccfSSadaf Ebrahimi }
12248*b7893ccfSSadaf Ebrahimi else
12249*b7893ccfSSadaf Ebrahimi {
12250*b7893ccfSSadaf Ebrahimi json.WriteString("PreferredBlockSize");
12251*b7893ccfSSadaf Ebrahimi json.WriteNumber(m_PreferredBlockSize);
12252*b7893ccfSSadaf Ebrahimi }
12253*b7893ccfSSadaf Ebrahimi
12254*b7893ccfSSadaf Ebrahimi json.WriteString("Blocks");
12255*b7893ccfSSadaf Ebrahimi json.BeginObject();
12256*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < m_Blocks.size(); ++i)
12257*b7893ccfSSadaf Ebrahimi {
12258*b7893ccfSSadaf Ebrahimi json.BeginString();
12259*b7893ccfSSadaf Ebrahimi json.ContinueString(m_Blocks[i]->GetId());
12260*b7893ccfSSadaf Ebrahimi json.EndString();
12261*b7893ccfSSadaf Ebrahimi
12262*b7893ccfSSadaf Ebrahimi m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12263*b7893ccfSSadaf Ebrahimi }
12264*b7893ccfSSadaf Ebrahimi json.EndObject();
12265*b7893ccfSSadaf Ebrahimi
12266*b7893ccfSSadaf Ebrahimi json.EndObject();
12267*b7893ccfSSadaf Ebrahimi }
12268*b7893ccfSSadaf Ebrahimi
12269*b7893ccfSSadaf Ebrahimi #endif // #if VMA_STATS_STRING_ENABLED
12270*b7893ccfSSadaf Ebrahimi
Defragment(class VmaBlockVectorDefragmentationContext * pCtx,VmaDefragmentationStats * pStats,VkDeviceSize & maxCpuBytesToMove,uint32_t & maxCpuAllocationsToMove,VkDeviceSize & maxGpuBytesToMove,uint32_t & maxGpuAllocationsToMove,VkCommandBuffer commandBuffer)12271*b7893ccfSSadaf Ebrahimi void VmaBlockVector::Defragment(
12272*b7893ccfSSadaf Ebrahimi class VmaBlockVectorDefragmentationContext* pCtx,
12273*b7893ccfSSadaf Ebrahimi VmaDefragmentationStats* pStats,
12274*b7893ccfSSadaf Ebrahimi VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12275*b7893ccfSSadaf Ebrahimi VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12276*b7893ccfSSadaf Ebrahimi VkCommandBuffer commandBuffer)
12277*b7893ccfSSadaf Ebrahimi {
12278*b7893ccfSSadaf Ebrahimi pCtx->res = VK_SUCCESS;
12279*b7893ccfSSadaf Ebrahimi
12280*b7893ccfSSadaf Ebrahimi const VkMemoryPropertyFlags memPropFlags =
12281*b7893ccfSSadaf Ebrahimi m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12282*b7893ccfSSadaf Ebrahimi const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12283*b7893ccfSSadaf Ebrahimi const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
12284*b7893ccfSSadaf Ebrahimi
12285*b7893ccfSSadaf Ebrahimi const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12286*b7893ccfSSadaf Ebrahimi isHostVisible;
12287*b7893ccfSSadaf Ebrahimi const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12288*b7893ccfSSadaf Ebrahimi (VMA_DEBUG_DETECT_CORRUPTION == 0 || !(isHostVisible && isHostCoherent));
12289*b7893ccfSSadaf Ebrahimi
12290*b7893ccfSSadaf Ebrahimi // There are options to defragment this memory type.
12291*b7893ccfSSadaf Ebrahimi if(canDefragmentOnCpu || canDefragmentOnGpu)
12292*b7893ccfSSadaf Ebrahimi {
12293*b7893ccfSSadaf Ebrahimi bool defragmentOnGpu;
12294*b7893ccfSSadaf Ebrahimi // There is only one option to defragment this memory type.
12295*b7893ccfSSadaf Ebrahimi if(canDefragmentOnGpu != canDefragmentOnCpu)
12296*b7893ccfSSadaf Ebrahimi {
12297*b7893ccfSSadaf Ebrahimi defragmentOnGpu = canDefragmentOnGpu;
12298*b7893ccfSSadaf Ebrahimi }
12299*b7893ccfSSadaf Ebrahimi // Both options are available: Heuristics to choose the best one.
12300*b7893ccfSSadaf Ebrahimi else
12301*b7893ccfSSadaf Ebrahimi {
12302*b7893ccfSSadaf Ebrahimi defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12303*b7893ccfSSadaf Ebrahimi m_hAllocator->IsIntegratedGpu();
12304*b7893ccfSSadaf Ebrahimi }
12305*b7893ccfSSadaf Ebrahimi
12306*b7893ccfSSadaf Ebrahimi bool overlappingMoveSupported = !defragmentOnGpu;
12307*b7893ccfSSadaf Ebrahimi
12308*b7893ccfSSadaf Ebrahimi if(m_hAllocator->m_UseMutex)
12309*b7893ccfSSadaf Ebrahimi {
12310*b7893ccfSSadaf Ebrahimi m_Mutex.LockWrite();
12311*b7893ccfSSadaf Ebrahimi pCtx->mutexLocked = true;
12312*b7893ccfSSadaf Ebrahimi }
12313*b7893ccfSSadaf Ebrahimi
12314*b7893ccfSSadaf Ebrahimi pCtx->Begin(overlappingMoveSupported);
12315*b7893ccfSSadaf Ebrahimi
12316*b7893ccfSSadaf Ebrahimi // Defragment.
12317*b7893ccfSSadaf Ebrahimi
12318*b7893ccfSSadaf Ebrahimi const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12319*b7893ccfSSadaf Ebrahimi const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12320*b7893ccfSSadaf Ebrahimi VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12321*b7893ccfSSadaf Ebrahimi VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12322*b7893ccfSSadaf Ebrahimi pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12323*b7893ccfSSadaf Ebrahimi
12324*b7893ccfSSadaf Ebrahimi // Accumulate statistics.
12325*b7893ccfSSadaf Ebrahimi if(pStats != VMA_NULL)
12326*b7893ccfSSadaf Ebrahimi {
12327*b7893ccfSSadaf Ebrahimi const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12328*b7893ccfSSadaf Ebrahimi const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12329*b7893ccfSSadaf Ebrahimi pStats->bytesMoved += bytesMoved;
12330*b7893ccfSSadaf Ebrahimi pStats->allocationsMoved += allocationsMoved;
12331*b7893ccfSSadaf Ebrahimi VMA_ASSERT(bytesMoved <= maxBytesToMove);
12332*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12333*b7893ccfSSadaf Ebrahimi if(defragmentOnGpu)
12334*b7893ccfSSadaf Ebrahimi {
12335*b7893ccfSSadaf Ebrahimi maxGpuBytesToMove -= bytesMoved;
12336*b7893ccfSSadaf Ebrahimi maxGpuAllocationsToMove -= allocationsMoved;
12337*b7893ccfSSadaf Ebrahimi }
12338*b7893ccfSSadaf Ebrahimi else
12339*b7893ccfSSadaf Ebrahimi {
12340*b7893ccfSSadaf Ebrahimi maxCpuBytesToMove -= bytesMoved;
12341*b7893ccfSSadaf Ebrahimi maxCpuAllocationsToMove -= allocationsMoved;
12342*b7893ccfSSadaf Ebrahimi }
12343*b7893ccfSSadaf Ebrahimi }
12344*b7893ccfSSadaf Ebrahimi
12345*b7893ccfSSadaf Ebrahimi if(pCtx->res >= VK_SUCCESS)
12346*b7893ccfSSadaf Ebrahimi {
12347*b7893ccfSSadaf Ebrahimi if(defragmentOnGpu)
12348*b7893ccfSSadaf Ebrahimi {
12349*b7893ccfSSadaf Ebrahimi ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12350*b7893ccfSSadaf Ebrahimi }
12351*b7893ccfSSadaf Ebrahimi else
12352*b7893ccfSSadaf Ebrahimi {
12353*b7893ccfSSadaf Ebrahimi ApplyDefragmentationMovesCpu(pCtx, moves);
12354*b7893ccfSSadaf Ebrahimi }
12355*b7893ccfSSadaf Ebrahimi }
12356*b7893ccfSSadaf Ebrahimi }
12357*b7893ccfSSadaf Ebrahimi }
12358*b7893ccfSSadaf Ebrahimi
DefragmentationEnd(class VmaBlockVectorDefragmentationContext * pCtx,VmaDefragmentationStats * pStats)12359*b7893ccfSSadaf Ebrahimi void VmaBlockVector::DefragmentationEnd(
12360*b7893ccfSSadaf Ebrahimi class VmaBlockVectorDefragmentationContext* pCtx,
12361*b7893ccfSSadaf Ebrahimi VmaDefragmentationStats* pStats)
12362*b7893ccfSSadaf Ebrahimi {
12363*b7893ccfSSadaf Ebrahimi // Destroy buffers.
12364*b7893ccfSSadaf Ebrahimi for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12365*b7893ccfSSadaf Ebrahimi {
12366*b7893ccfSSadaf Ebrahimi VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12367*b7893ccfSSadaf Ebrahimi if(blockCtx.hBuffer)
12368*b7893ccfSSadaf Ebrahimi {
12369*b7893ccfSSadaf Ebrahimi (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12370*b7893ccfSSadaf Ebrahimi m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12371*b7893ccfSSadaf Ebrahimi }
12372*b7893ccfSSadaf Ebrahimi }
12373*b7893ccfSSadaf Ebrahimi
12374*b7893ccfSSadaf Ebrahimi if(pCtx->res >= VK_SUCCESS)
12375*b7893ccfSSadaf Ebrahimi {
12376*b7893ccfSSadaf Ebrahimi FreeEmptyBlocks(pStats);
12377*b7893ccfSSadaf Ebrahimi }
12378*b7893ccfSSadaf Ebrahimi
12379*b7893ccfSSadaf Ebrahimi if(pCtx->mutexLocked)
12380*b7893ccfSSadaf Ebrahimi {
12381*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_hAllocator->m_UseMutex);
12382*b7893ccfSSadaf Ebrahimi m_Mutex.UnlockWrite();
12383*b7893ccfSSadaf Ebrahimi }
12384*b7893ccfSSadaf Ebrahimi }
12385*b7893ccfSSadaf Ebrahimi
CalcAllocationCount()12386*b7893ccfSSadaf Ebrahimi size_t VmaBlockVector::CalcAllocationCount() const
12387*b7893ccfSSadaf Ebrahimi {
12388*b7893ccfSSadaf Ebrahimi size_t result = 0;
12389*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < m_Blocks.size(); ++i)
12390*b7893ccfSSadaf Ebrahimi {
12391*b7893ccfSSadaf Ebrahimi result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12392*b7893ccfSSadaf Ebrahimi }
12393*b7893ccfSSadaf Ebrahimi return result;
12394*b7893ccfSSadaf Ebrahimi }
12395*b7893ccfSSadaf Ebrahimi
IsBufferImageGranularityConflictPossible()12396*b7893ccfSSadaf Ebrahimi bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12397*b7893ccfSSadaf Ebrahimi {
12398*b7893ccfSSadaf Ebrahimi if(m_BufferImageGranularity == 1)
12399*b7893ccfSSadaf Ebrahimi {
12400*b7893ccfSSadaf Ebrahimi return false;
12401*b7893ccfSSadaf Ebrahimi }
12402*b7893ccfSSadaf Ebrahimi VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12403*b7893ccfSSadaf Ebrahimi for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12404*b7893ccfSSadaf Ebrahimi {
12405*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12406*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_Algorithm == 0);
12407*b7893ccfSSadaf Ebrahimi VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12408*b7893ccfSSadaf Ebrahimi if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12409*b7893ccfSSadaf Ebrahimi {
12410*b7893ccfSSadaf Ebrahimi return true;
12411*b7893ccfSSadaf Ebrahimi }
12412*b7893ccfSSadaf Ebrahimi }
12413*b7893ccfSSadaf Ebrahimi return false;
12414*b7893ccfSSadaf Ebrahimi }
12415*b7893ccfSSadaf Ebrahimi
MakePoolAllocationsLost(uint32_t currentFrameIndex,size_t * pLostAllocationCount)12416*b7893ccfSSadaf Ebrahimi void VmaBlockVector::MakePoolAllocationsLost(
12417*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
12418*b7893ccfSSadaf Ebrahimi size_t* pLostAllocationCount)
12419*b7893ccfSSadaf Ebrahimi {
12420*b7893ccfSSadaf Ebrahimi VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12421*b7893ccfSSadaf Ebrahimi size_t lostAllocationCount = 0;
12422*b7893ccfSSadaf Ebrahimi for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12423*b7893ccfSSadaf Ebrahimi {
12424*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12425*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pBlock);
12426*b7893ccfSSadaf Ebrahimi lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12427*b7893ccfSSadaf Ebrahimi }
12428*b7893ccfSSadaf Ebrahimi if(pLostAllocationCount != VMA_NULL)
12429*b7893ccfSSadaf Ebrahimi {
12430*b7893ccfSSadaf Ebrahimi *pLostAllocationCount = lostAllocationCount;
12431*b7893ccfSSadaf Ebrahimi }
12432*b7893ccfSSadaf Ebrahimi }
12433*b7893ccfSSadaf Ebrahimi
CheckCorruption()12434*b7893ccfSSadaf Ebrahimi VkResult VmaBlockVector::CheckCorruption()
12435*b7893ccfSSadaf Ebrahimi {
12436*b7893ccfSSadaf Ebrahimi if(!IsCorruptionDetectionEnabled())
12437*b7893ccfSSadaf Ebrahimi {
12438*b7893ccfSSadaf Ebrahimi return VK_ERROR_FEATURE_NOT_PRESENT;
12439*b7893ccfSSadaf Ebrahimi }
12440*b7893ccfSSadaf Ebrahimi
12441*b7893ccfSSadaf Ebrahimi VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12442*b7893ccfSSadaf Ebrahimi for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12443*b7893ccfSSadaf Ebrahimi {
12444*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12445*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pBlock);
12446*b7893ccfSSadaf Ebrahimi VkResult res = pBlock->CheckCorruption(m_hAllocator);
12447*b7893ccfSSadaf Ebrahimi if(res != VK_SUCCESS)
12448*b7893ccfSSadaf Ebrahimi {
12449*b7893ccfSSadaf Ebrahimi return res;
12450*b7893ccfSSadaf Ebrahimi }
12451*b7893ccfSSadaf Ebrahimi }
12452*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
12453*b7893ccfSSadaf Ebrahimi }
12454*b7893ccfSSadaf Ebrahimi
AddStats(VmaStats * pStats)12455*b7893ccfSSadaf Ebrahimi void VmaBlockVector::AddStats(VmaStats* pStats)
12456*b7893ccfSSadaf Ebrahimi {
12457*b7893ccfSSadaf Ebrahimi const uint32_t memTypeIndex = m_MemoryTypeIndex;
12458*b7893ccfSSadaf Ebrahimi const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12459*b7893ccfSSadaf Ebrahimi
12460*b7893ccfSSadaf Ebrahimi VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12461*b7893ccfSSadaf Ebrahimi
12462*b7893ccfSSadaf Ebrahimi for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12463*b7893ccfSSadaf Ebrahimi {
12464*b7893ccfSSadaf Ebrahimi const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12465*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pBlock);
12466*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(pBlock->Validate());
12467*b7893ccfSSadaf Ebrahimi VmaStatInfo allocationStatInfo;
12468*b7893ccfSSadaf Ebrahimi pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12469*b7893ccfSSadaf Ebrahimi VmaAddStatInfo(pStats->total, allocationStatInfo);
12470*b7893ccfSSadaf Ebrahimi VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12471*b7893ccfSSadaf Ebrahimi VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12472*b7893ccfSSadaf Ebrahimi }
12473*b7893ccfSSadaf Ebrahimi }
12474*b7893ccfSSadaf Ebrahimi
12475*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
12476*b7893ccfSSadaf Ebrahimi // VmaDefragmentationAlgorithm_Generic members definition
12477*b7893ccfSSadaf Ebrahimi
VmaDefragmentationAlgorithm_Generic(VmaAllocator hAllocator,VmaBlockVector * pBlockVector,uint32_t currentFrameIndex,bool overlappingMoveSupported)12478*b7893ccfSSadaf Ebrahimi VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12479*b7893ccfSSadaf Ebrahimi VmaAllocator hAllocator,
12480*b7893ccfSSadaf Ebrahimi VmaBlockVector* pBlockVector,
12481*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
12482*b7893ccfSSadaf Ebrahimi bool overlappingMoveSupported) :
12483*b7893ccfSSadaf Ebrahimi VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12484*b7893ccfSSadaf Ebrahimi m_AllocationCount(0),
12485*b7893ccfSSadaf Ebrahimi m_AllAllocations(false),
12486*b7893ccfSSadaf Ebrahimi m_BytesMoved(0),
12487*b7893ccfSSadaf Ebrahimi m_AllocationsMoved(0),
12488*b7893ccfSSadaf Ebrahimi m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12489*b7893ccfSSadaf Ebrahimi {
12490*b7893ccfSSadaf Ebrahimi // Create block info for each block.
12491*b7893ccfSSadaf Ebrahimi const size_t blockCount = m_pBlockVector->m_Blocks.size();
12492*b7893ccfSSadaf Ebrahimi for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12493*b7893ccfSSadaf Ebrahimi {
12494*b7893ccfSSadaf Ebrahimi BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12495*b7893ccfSSadaf Ebrahimi pBlockInfo->m_OriginalBlockIndex = blockIndex;
12496*b7893ccfSSadaf Ebrahimi pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12497*b7893ccfSSadaf Ebrahimi m_Blocks.push_back(pBlockInfo);
12498*b7893ccfSSadaf Ebrahimi }
12499*b7893ccfSSadaf Ebrahimi
12500*b7893ccfSSadaf Ebrahimi // Sort them by m_pBlock pointer value.
12501*b7893ccfSSadaf Ebrahimi VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12502*b7893ccfSSadaf Ebrahimi }
12503*b7893ccfSSadaf Ebrahimi
~VmaDefragmentationAlgorithm_Generic()12504*b7893ccfSSadaf Ebrahimi VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12505*b7893ccfSSadaf Ebrahimi {
12506*b7893ccfSSadaf Ebrahimi for(size_t i = m_Blocks.size(); i--; )
12507*b7893ccfSSadaf Ebrahimi {
12508*b7893ccfSSadaf Ebrahimi vma_delete(m_hAllocator, m_Blocks[i]);
12509*b7893ccfSSadaf Ebrahimi }
12510*b7893ccfSSadaf Ebrahimi }
12511*b7893ccfSSadaf Ebrahimi
AddAllocation(VmaAllocation hAlloc,VkBool32 * pChanged)12512*b7893ccfSSadaf Ebrahimi void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12513*b7893ccfSSadaf Ebrahimi {
12514*b7893ccfSSadaf Ebrahimi // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12515*b7893ccfSSadaf Ebrahimi if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12516*b7893ccfSSadaf Ebrahimi {
12517*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12518*b7893ccfSSadaf Ebrahimi BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12519*b7893ccfSSadaf Ebrahimi if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12520*b7893ccfSSadaf Ebrahimi {
12521*b7893ccfSSadaf Ebrahimi AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12522*b7893ccfSSadaf Ebrahimi (*it)->m_Allocations.push_back(allocInfo);
12523*b7893ccfSSadaf Ebrahimi }
12524*b7893ccfSSadaf Ebrahimi else
12525*b7893ccfSSadaf Ebrahimi {
12526*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
12527*b7893ccfSSadaf Ebrahimi }
12528*b7893ccfSSadaf Ebrahimi
12529*b7893ccfSSadaf Ebrahimi ++m_AllocationCount;
12530*b7893ccfSSadaf Ebrahimi }
12531*b7893ccfSSadaf Ebrahimi }
12532*b7893ccfSSadaf Ebrahimi
DefragmentRound(VmaVector<VmaDefragmentationMove,VmaStlAllocator<VmaDefragmentationMove>> & moves,VkDeviceSize maxBytesToMove,uint32_t maxAllocationsToMove)12533*b7893ccfSSadaf Ebrahimi VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12534*b7893ccfSSadaf Ebrahimi VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12535*b7893ccfSSadaf Ebrahimi VkDeviceSize maxBytesToMove,
12536*b7893ccfSSadaf Ebrahimi uint32_t maxAllocationsToMove)
12537*b7893ccfSSadaf Ebrahimi {
12538*b7893ccfSSadaf Ebrahimi if(m_Blocks.empty())
12539*b7893ccfSSadaf Ebrahimi {
12540*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
12541*b7893ccfSSadaf Ebrahimi }
12542*b7893ccfSSadaf Ebrahimi
12543*b7893ccfSSadaf Ebrahimi // This is a choice based on research.
12544*b7893ccfSSadaf Ebrahimi // Option 1:
12545*b7893ccfSSadaf Ebrahimi uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12546*b7893ccfSSadaf Ebrahimi // Option 2:
12547*b7893ccfSSadaf Ebrahimi //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12548*b7893ccfSSadaf Ebrahimi // Option 3:
12549*b7893ccfSSadaf Ebrahimi //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12550*b7893ccfSSadaf Ebrahimi
12551*b7893ccfSSadaf Ebrahimi size_t srcBlockMinIndex = 0;
12552*b7893ccfSSadaf Ebrahimi // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12553*b7893ccfSSadaf Ebrahimi /*
12554*b7893ccfSSadaf Ebrahimi if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12555*b7893ccfSSadaf Ebrahimi {
12556*b7893ccfSSadaf Ebrahimi const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12557*b7893ccfSSadaf Ebrahimi if(blocksWithNonMovableCount > 0)
12558*b7893ccfSSadaf Ebrahimi {
12559*b7893ccfSSadaf Ebrahimi srcBlockMinIndex = blocksWithNonMovableCount - 1;
12560*b7893ccfSSadaf Ebrahimi }
12561*b7893ccfSSadaf Ebrahimi }
12562*b7893ccfSSadaf Ebrahimi */
12563*b7893ccfSSadaf Ebrahimi
12564*b7893ccfSSadaf Ebrahimi size_t srcBlockIndex = m_Blocks.size() - 1;
12565*b7893ccfSSadaf Ebrahimi size_t srcAllocIndex = SIZE_MAX;
12566*b7893ccfSSadaf Ebrahimi for(;;)
12567*b7893ccfSSadaf Ebrahimi {
12568*b7893ccfSSadaf Ebrahimi // 1. Find next allocation to move.
12569*b7893ccfSSadaf Ebrahimi // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12570*b7893ccfSSadaf Ebrahimi // 1.2. Then start from last to first m_Allocations.
12571*b7893ccfSSadaf Ebrahimi while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12572*b7893ccfSSadaf Ebrahimi {
12573*b7893ccfSSadaf Ebrahimi if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12574*b7893ccfSSadaf Ebrahimi {
12575*b7893ccfSSadaf Ebrahimi // Finished: no more allocations to process.
12576*b7893ccfSSadaf Ebrahimi if(srcBlockIndex == srcBlockMinIndex)
12577*b7893ccfSSadaf Ebrahimi {
12578*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
12579*b7893ccfSSadaf Ebrahimi }
12580*b7893ccfSSadaf Ebrahimi else
12581*b7893ccfSSadaf Ebrahimi {
12582*b7893ccfSSadaf Ebrahimi --srcBlockIndex;
12583*b7893ccfSSadaf Ebrahimi srcAllocIndex = SIZE_MAX;
12584*b7893ccfSSadaf Ebrahimi }
12585*b7893ccfSSadaf Ebrahimi }
12586*b7893ccfSSadaf Ebrahimi else
12587*b7893ccfSSadaf Ebrahimi {
12588*b7893ccfSSadaf Ebrahimi srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12589*b7893ccfSSadaf Ebrahimi }
12590*b7893ccfSSadaf Ebrahimi }
12591*b7893ccfSSadaf Ebrahimi
12592*b7893ccfSSadaf Ebrahimi BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12593*b7893ccfSSadaf Ebrahimi AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12594*b7893ccfSSadaf Ebrahimi
12595*b7893ccfSSadaf Ebrahimi const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12596*b7893ccfSSadaf Ebrahimi const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12597*b7893ccfSSadaf Ebrahimi const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12598*b7893ccfSSadaf Ebrahimi const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12599*b7893ccfSSadaf Ebrahimi
12600*b7893ccfSSadaf Ebrahimi // 2. Try to find new place for this allocation in preceding or current block.
12601*b7893ccfSSadaf Ebrahimi for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12602*b7893ccfSSadaf Ebrahimi {
12603*b7893ccfSSadaf Ebrahimi BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12604*b7893ccfSSadaf Ebrahimi VmaAllocationRequest dstAllocRequest;
12605*b7893ccfSSadaf Ebrahimi if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12606*b7893ccfSSadaf Ebrahimi m_CurrentFrameIndex,
12607*b7893ccfSSadaf Ebrahimi m_pBlockVector->GetFrameInUseCount(),
12608*b7893ccfSSadaf Ebrahimi m_pBlockVector->GetBufferImageGranularity(),
12609*b7893ccfSSadaf Ebrahimi size,
12610*b7893ccfSSadaf Ebrahimi alignment,
12611*b7893ccfSSadaf Ebrahimi false, // upperAddress
12612*b7893ccfSSadaf Ebrahimi suballocType,
12613*b7893ccfSSadaf Ebrahimi false, // canMakeOtherLost
12614*b7893ccfSSadaf Ebrahimi strategy,
12615*b7893ccfSSadaf Ebrahimi &dstAllocRequest) &&
12616*b7893ccfSSadaf Ebrahimi MoveMakesSense(
12617*b7893ccfSSadaf Ebrahimi dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12618*b7893ccfSSadaf Ebrahimi {
12619*b7893ccfSSadaf Ebrahimi VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12620*b7893ccfSSadaf Ebrahimi
12621*b7893ccfSSadaf Ebrahimi // Reached limit on number of allocations or bytes to move.
12622*b7893ccfSSadaf Ebrahimi if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12623*b7893ccfSSadaf Ebrahimi (m_BytesMoved + size > maxBytesToMove))
12624*b7893ccfSSadaf Ebrahimi {
12625*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
12626*b7893ccfSSadaf Ebrahimi }
12627*b7893ccfSSadaf Ebrahimi
12628*b7893ccfSSadaf Ebrahimi VmaDefragmentationMove move;
12629*b7893ccfSSadaf Ebrahimi move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12630*b7893ccfSSadaf Ebrahimi move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12631*b7893ccfSSadaf Ebrahimi move.srcOffset = srcOffset;
12632*b7893ccfSSadaf Ebrahimi move.dstOffset = dstAllocRequest.offset;
12633*b7893ccfSSadaf Ebrahimi move.size = size;
12634*b7893ccfSSadaf Ebrahimi moves.push_back(move);
12635*b7893ccfSSadaf Ebrahimi
12636*b7893ccfSSadaf Ebrahimi pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12637*b7893ccfSSadaf Ebrahimi dstAllocRequest,
12638*b7893ccfSSadaf Ebrahimi suballocType,
12639*b7893ccfSSadaf Ebrahimi size,
12640*b7893ccfSSadaf Ebrahimi false, // upperAddress
12641*b7893ccfSSadaf Ebrahimi allocInfo.m_hAllocation);
12642*b7893ccfSSadaf Ebrahimi pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12643*b7893ccfSSadaf Ebrahimi
12644*b7893ccfSSadaf Ebrahimi allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12645*b7893ccfSSadaf Ebrahimi
12646*b7893ccfSSadaf Ebrahimi if(allocInfo.m_pChanged != VMA_NULL)
12647*b7893ccfSSadaf Ebrahimi {
12648*b7893ccfSSadaf Ebrahimi *allocInfo.m_pChanged = VK_TRUE;
12649*b7893ccfSSadaf Ebrahimi }
12650*b7893ccfSSadaf Ebrahimi
12651*b7893ccfSSadaf Ebrahimi ++m_AllocationsMoved;
12652*b7893ccfSSadaf Ebrahimi m_BytesMoved += size;
12653*b7893ccfSSadaf Ebrahimi
12654*b7893ccfSSadaf Ebrahimi VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12655*b7893ccfSSadaf Ebrahimi
12656*b7893ccfSSadaf Ebrahimi break;
12657*b7893ccfSSadaf Ebrahimi }
12658*b7893ccfSSadaf Ebrahimi }
12659*b7893ccfSSadaf Ebrahimi
12660*b7893ccfSSadaf Ebrahimi // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12661*b7893ccfSSadaf Ebrahimi
12662*b7893ccfSSadaf Ebrahimi if(srcAllocIndex > 0)
12663*b7893ccfSSadaf Ebrahimi {
12664*b7893ccfSSadaf Ebrahimi --srcAllocIndex;
12665*b7893ccfSSadaf Ebrahimi }
12666*b7893ccfSSadaf Ebrahimi else
12667*b7893ccfSSadaf Ebrahimi {
12668*b7893ccfSSadaf Ebrahimi if(srcBlockIndex > 0)
12669*b7893ccfSSadaf Ebrahimi {
12670*b7893ccfSSadaf Ebrahimi --srcBlockIndex;
12671*b7893ccfSSadaf Ebrahimi srcAllocIndex = SIZE_MAX;
12672*b7893ccfSSadaf Ebrahimi }
12673*b7893ccfSSadaf Ebrahimi else
12674*b7893ccfSSadaf Ebrahimi {
12675*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
12676*b7893ccfSSadaf Ebrahimi }
12677*b7893ccfSSadaf Ebrahimi }
12678*b7893ccfSSadaf Ebrahimi }
12679*b7893ccfSSadaf Ebrahimi }
12680*b7893ccfSSadaf Ebrahimi
CalcBlocksWithNonMovableCount()12681*b7893ccfSSadaf Ebrahimi size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12682*b7893ccfSSadaf Ebrahimi {
12683*b7893ccfSSadaf Ebrahimi size_t result = 0;
12684*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < m_Blocks.size(); ++i)
12685*b7893ccfSSadaf Ebrahimi {
12686*b7893ccfSSadaf Ebrahimi if(m_Blocks[i]->m_HasNonMovableAllocations)
12687*b7893ccfSSadaf Ebrahimi {
12688*b7893ccfSSadaf Ebrahimi ++result;
12689*b7893ccfSSadaf Ebrahimi }
12690*b7893ccfSSadaf Ebrahimi }
12691*b7893ccfSSadaf Ebrahimi return result;
12692*b7893ccfSSadaf Ebrahimi }
12693*b7893ccfSSadaf Ebrahimi
Defragment(VmaVector<VmaDefragmentationMove,VmaStlAllocator<VmaDefragmentationMove>> & moves,VkDeviceSize maxBytesToMove,uint32_t maxAllocationsToMove)12694*b7893ccfSSadaf Ebrahimi VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12695*b7893ccfSSadaf Ebrahimi VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12696*b7893ccfSSadaf Ebrahimi VkDeviceSize maxBytesToMove,
12697*b7893ccfSSadaf Ebrahimi uint32_t maxAllocationsToMove)
12698*b7893ccfSSadaf Ebrahimi {
12699*b7893ccfSSadaf Ebrahimi if(!m_AllAllocations && m_AllocationCount == 0)
12700*b7893ccfSSadaf Ebrahimi {
12701*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
12702*b7893ccfSSadaf Ebrahimi }
12703*b7893ccfSSadaf Ebrahimi
12704*b7893ccfSSadaf Ebrahimi const size_t blockCount = m_Blocks.size();
12705*b7893ccfSSadaf Ebrahimi for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12706*b7893ccfSSadaf Ebrahimi {
12707*b7893ccfSSadaf Ebrahimi BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12708*b7893ccfSSadaf Ebrahimi
12709*b7893ccfSSadaf Ebrahimi if(m_AllAllocations)
12710*b7893ccfSSadaf Ebrahimi {
12711*b7893ccfSSadaf Ebrahimi VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12712*b7893ccfSSadaf Ebrahimi for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12713*b7893ccfSSadaf Ebrahimi it != pMetadata->m_Suballocations.end();
12714*b7893ccfSSadaf Ebrahimi ++it)
12715*b7893ccfSSadaf Ebrahimi {
12716*b7893ccfSSadaf Ebrahimi if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12717*b7893ccfSSadaf Ebrahimi {
12718*b7893ccfSSadaf Ebrahimi AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12719*b7893ccfSSadaf Ebrahimi pBlockInfo->m_Allocations.push_back(allocInfo);
12720*b7893ccfSSadaf Ebrahimi }
12721*b7893ccfSSadaf Ebrahimi }
12722*b7893ccfSSadaf Ebrahimi }
12723*b7893ccfSSadaf Ebrahimi
12724*b7893ccfSSadaf Ebrahimi pBlockInfo->CalcHasNonMovableAllocations();
12725*b7893ccfSSadaf Ebrahimi
12726*b7893ccfSSadaf Ebrahimi // This is a choice based on research.
12727*b7893ccfSSadaf Ebrahimi // Option 1:
12728*b7893ccfSSadaf Ebrahimi pBlockInfo->SortAllocationsByOffsetDescending();
12729*b7893ccfSSadaf Ebrahimi // Option 2:
12730*b7893ccfSSadaf Ebrahimi //pBlockInfo->SortAllocationsBySizeDescending();
12731*b7893ccfSSadaf Ebrahimi }
12732*b7893ccfSSadaf Ebrahimi
12733*b7893ccfSSadaf Ebrahimi // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12734*b7893ccfSSadaf Ebrahimi VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12735*b7893ccfSSadaf Ebrahimi
12736*b7893ccfSSadaf Ebrahimi // This is a choice based on research.
12737*b7893ccfSSadaf Ebrahimi const uint32_t roundCount = 2;
12738*b7893ccfSSadaf Ebrahimi
12739*b7893ccfSSadaf Ebrahimi // Execute defragmentation rounds (the main part).
12740*b7893ccfSSadaf Ebrahimi VkResult result = VK_SUCCESS;
12741*b7893ccfSSadaf Ebrahimi for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12742*b7893ccfSSadaf Ebrahimi {
12743*b7893ccfSSadaf Ebrahimi result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12744*b7893ccfSSadaf Ebrahimi }
12745*b7893ccfSSadaf Ebrahimi
12746*b7893ccfSSadaf Ebrahimi return result;
12747*b7893ccfSSadaf Ebrahimi }
12748*b7893ccfSSadaf Ebrahimi
MoveMakesSense(size_t dstBlockIndex,VkDeviceSize dstOffset,size_t srcBlockIndex,VkDeviceSize srcOffset)12749*b7893ccfSSadaf Ebrahimi bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12750*b7893ccfSSadaf Ebrahimi size_t dstBlockIndex, VkDeviceSize dstOffset,
12751*b7893ccfSSadaf Ebrahimi size_t srcBlockIndex, VkDeviceSize srcOffset)
12752*b7893ccfSSadaf Ebrahimi {
12753*b7893ccfSSadaf Ebrahimi if(dstBlockIndex < srcBlockIndex)
12754*b7893ccfSSadaf Ebrahimi {
12755*b7893ccfSSadaf Ebrahimi return true;
12756*b7893ccfSSadaf Ebrahimi }
12757*b7893ccfSSadaf Ebrahimi if(dstBlockIndex > srcBlockIndex)
12758*b7893ccfSSadaf Ebrahimi {
12759*b7893ccfSSadaf Ebrahimi return false;
12760*b7893ccfSSadaf Ebrahimi }
12761*b7893ccfSSadaf Ebrahimi if(dstOffset < srcOffset)
12762*b7893ccfSSadaf Ebrahimi {
12763*b7893ccfSSadaf Ebrahimi return true;
12764*b7893ccfSSadaf Ebrahimi }
12765*b7893ccfSSadaf Ebrahimi return false;
12766*b7893ccfSSadaf Ebrahimi }
12767*b7893ccfSSadaf Ebrahimi
12768*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
12769*b7893ccfSSadaf Ebrahimi // VmaDefragmentationAlgorithm_Fast
12770*b7893ccfSSadaf Ebrahimi
VmaDefragmentationAlgorithm_Fast(VmaAllocator hAllocator,VmaBlockVector * pBlockVector,uint32_t currentFrameIndex,bool overlappingMoveSupported)12771*b7893ccfSSadaf Ebrahimi VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12772*b7893ccfSSadaf Ebrahimi VmaAllocator hAllocator,
12773*b7893ccfSSadaf Ebrahimi VmaBlockVector* pBlockVector,
12774*b7893ccfSSadaf Ebrahimi uint32_t currentFrameIndex,
12775*b7893ccfSSadaf Ebrahimi bool overlappingMoveSupported) :
12776*b7893ccfSSadaf Ebrahimi VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12777*b7893ccfSSadaf Ebrahimi m_OverlappingMoveSupported(overlappingMoveSupported),
12778*b7893ccfSSadaf Ebrahimi m_AllocationCount(0),
12779*b7893ccfSSadaf Ebrahimi m_AllAllocations(false),
12780*b7893ccfSSadaf Ebrahimi m_BytesMoved(0),
12781*b7893ccfSSadaf Ebrahimi m_AllocationsMoved(0),
12782*b7893ccfSSadaf Ebrahimi m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12783*b7893ccfSSadaf Ebrahimi {
12784*b7893ccfSSadaf Ebrahimi VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12785*b7893ccfSSadaf Ebrahimi
12786*b7893ccfSSadaf Ebrahimi }
12787*b7893ccfSSadaf Ebrahimi
~VmaDefragmentationAlgorithm_Fast()12788*b7893ccfSSadaf Ebrahimi VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12789*b7893ccfSSadaf Ebrahimi {
12790*b7893ccfSSadaf Ebrahimi }
12791*b7893ccfSSadaf Ebrahimi
Defragment(VmaVector<VmaDefragmentationMove,VmaStlAllocator<VmaDefragmentationMove>> & moves,VkDeviceSize maxBytesToMove,uint32_t maxAllocationsToMove)12792*b7893ccfSSadaf Ebrahimi VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12793*b7893ccfSSadaf Ebrahimi VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12794*b7893ccfSSadaf Ebrahimi VkDeviceSize maxBytesToMove,
12795*b7893ccfSSadaf Ebrahimi uint32_t maxAllocationsToMove)
12796*b7893ccfSSadaf Ebrahimi {
12797*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12798*b7893ccfSSadaf Ebrahimi
12799*b7893ccfSSadaf Ebrahimi const size_t blockCount = m_pBlockVector->GetBlockCount();
12800*b7893ccfSSadaf Ebrahimi if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12801*b7893ccfSSadaf Ebrahimi {
12802*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
12803*b7893ccfSSadaf Ebrahimi }
12804*b7893ccfSSadaf Ebrahimi
12805*b7893ccfSSadaf Ebrahimi PreprocessMetadata();
12806*b7893ccfSSadaf Ebrahimi
12807*b7893ccfSSadaf Ebrahimi // Sort blocks in order from most destination.
12808*b7893ccfSSadaf Ebrahimi
12809*b7893ccfSSadaf Ebrahimi m_BlockInfos.resize(blockCount);
12810*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < blockCount; ++i)
12811*b7893ccfSSadaf Ebrahimi {
12812*b7893ccfSSadaf Ebrahimi m_BlockInfos[i].origBlockIndex = i;
12813*b7893ccfSSadaf Ebrahimi }
12814*b7893ccfSSadaf Ebrahimi
12815*b7893ccfSSadaf Ebrahimi VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12816*b7893ccfSSadaf Ebrahimi return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12817*b7893ccfSSadaf Ebrahimi m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12818*b7893ccfSSadaf Ebrahimi });
12819*b7893ccfSSadaf Ebrahimi
12820*b7893ccfSSadaf Ebrahimi // THE MAIN ALGORITHM
12821*b7893ccfSSadaf Ebrahimi
12822*b7893ccfSSadaf Ebrahimi FreeSpaceDatabase freeSpaceDb;
12823*b7893ccfSSadaf Ebrahimi
12824*b7893ccfSSadaf Ebrahimi size_t dstBlockInfoIndex = 0;
12825*b7893ccfSSadaf Ebrahimi size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12826*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12827*b7893ccfSSadaf Ebrahimi VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12828*b7893ccfSSadaf Ebrahimi VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12829*b7893ccfSSadaf Ebrahimi VkDeviceSize dstOffset = 0;
12830*b7893ccfSSadaf Ebrahimi
12831*b7893ccfSSadaf Ebrahimi bool end = false;
12832*b7893ccfSSadaf Ebrahimi for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12833*b7893ccfSSadaf Ebrahimi {
12834*b7893ccfSSadaf Ebrahimi const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12835*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12836*b7893ccfSSadaf Ebrahimi VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12837*b7893ccfSSadaf Ebrahimi for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12838*b7893ccfSSadaf Ebrahimi !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12839*b7893ccfSSadaf Ebrahimi {
12840*b7893ccfSSadaf Ebrahimi VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12841*b7893ccfSSadaf Ebrahimi const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12842*b7893ccfSSadaf Ebrahimi const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12843*b7893ccfSSadaf Ebrahimi if(m_AllocationsMoved == maxAllocationsToMove ||
12844*b7893ccfSSadaf Ebrahimi m_BytesMoved + srcAllocSize > maxBytesToMove)
12845*b7893ccfSSadaf Ebrahimi {
12846*b7893ccfSSadaf Ebrahimi end = true;
12847*b7893ccfSSadaf Ebrahimi break;
12848*b7893ccfSSadaf Ebrahimi }
12849*b7893ccfSSadaf Ebrahimi const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12850*b7893ccfSSadaf Ebrahimi
12851*b7893ccfSSadaf Ebrahimi // Try to place it in one of free spaces from the database.
12852*b7893ccfSSadaf Ebrahimi size_t freeSpaceInfoIndex;
12853*b7893ccfSSadaf Ebrahimi VkDeviceSize dstAllocOffset;
12854*b7893ccfSSadaf Ebrahimi if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12855*b7893ccfSSadaf Ebrahimi freeSpaceInfoIndex, dstAllocOffset))
12856*b7893ccfSSadaf Ebrahimi {
12857*b7893ccfSSadaf Ebrahimi size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
12858*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
12859*b7893ccfSSadaf Ebrahimi VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
12860*b7893ccfSSadaf Ebrahimi VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();
12861*b7893ccfSSadaf Ebrahimi
12862*b7893ccfSSadaf Ebrahimi // Same block
12863*b7893ccfSSadaf Ebrahimi if(freeSpaceInfoIndex == srcBlockInfoIndex)
12864*b7893ccfSSadaf Ebrahimi {
12865*b7893ccfSSadaf Ebrahimi VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12866*b7893ccfSSadaf Ebrahimi
12867*b7893ccfSSadaf Ebrahimi // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12868*b7893ccfSSadaf Ebrahimi
12869*b7893ccfSSadaf Ebrahimi VmaSuballocation suballoc = *srcSuballocIt;
12870*b7893ccfSSadaf Ebrahimi suballoc.offset = dstAllocOffset;
12871*b7893ccfSSadaf Ebrahimi suballoc.hAllocation->ChangeOffset(dstAllocOffset);
12872*b7893ccfSSadaf Ebrahimi m_BytesMoved += srcAllocSize;
12873*b7893ccfSSadaf Ebrahimi ++m_AllocationsMoved;
12874*b7893ccfSSadaf Ebrahimi
12875*b7893ccfSSadaf Ebrahimi VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12876*b7893ccfSSadaf Ebrahimi ++nextSuballocIt;
12877*b7893ccfSSadaf Ebrahimi pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12878*b7893ccfSSadaf Ebrahimi srcSuballocIt = nextSuballocIt;
12879*b7893ccfSSadaf Ebrahimi
12880*b7893ccfSSadaf Ebrahimi InsertSuballoc(pFreeSpaceMetadata, suballoc);
12881*b7893ccfSSadaf Ebrahimi
12882*b7893ccfSSadaf Ebrahimi VmaDefragmentationMove move = {
12883*b7893ccfSSadaf Ebrahimi srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12884*b7893ccfSSadaf Ebrahimi srcAllocOffset, dstAllocOffset,
12885*b7893ccfSSadaf Ebrahimi srcAllocSize };
12886*b7893ccfSSadaf Ebrahimi moves.push_back(move);
12887*b7893ccfSSadaf Ebrahimi }
12888*b7893ccfSSadaf Ebrahimi // Different block
12889*b7893ccfSSadaf Ebrahimi else
12890*b7893ccfSSadaf Ebrahimi {
12891*b7893ccfSSadaf Ebrahimi // MOVE OPTION 2: Move the allocation to a different block.
12892*b7893ccfSSadaf Ebrahimi
12893*b7893ccfSSadaf Ebrahimi VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
12894*b7893ccfSSadaf Ebrahimi
12895*b7893ccfSSadaf Ebrahimi VmaSuballocation suballoc = *srcSuballocIt;
12896*b7893ccfSSadaf Ebrahimi suballoc.offset = dstAllocOffset;
12897*b7893ccfSSadaf Ebrahimi suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
12898*b7893ccfSSadaf Ebrahimi m_BytesMoved += srcAllocSize;
12899*b7893ccfSSadaf Ebrahimi ++m_AllocationsMoved;
12900*b7893ccfSSadaf Ebrahimi
12901*b7893ccfSSadaf Ebrahimi VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12902*b7893ccfSSadaf Ebrahimi ++nextSuballocIt;
12903*b7893ccfSSadaf Ebrahimi pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12904*b7893ccfSSadaf Ebrahimi srcSuballocIt = nextSuballocIt;
12905*b7893ccfSSadaf Ebrahimi
12906*b7893ccfSSadaf Ebrahimi InsertSuballoc(pFreeSpaceMetadata, suballoc);
12907*b7893ccfSSadaf Ebrahimi
12908*b7893ccfSSadaf Ebrahimi VmaDefragmentationMove move = {
12909*b7893ccfSSadaf Ebrahimi srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12910*b7893ccfSSadaf Ebrahimi srcAllocOffset, dstAllocOffset,
12911*b7893ccfSSadaf Ebrahimi srcAllocSize };
12912*b7893ccfSSadaf Ebrahimi moves.push_back(move);
12913*b7893ccfSSadaf Ebrahimi }
12914*b7893ccfSSadaf Ebrahimi }
12915*b7893ccfSSadaf Ebrahimi else
12916*b7893ccfSSadaf Ebrahimi {
12917*b7893ccfSSadaf Ebrahimi dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
12918*b7893ccfSSadaf Ebrahimi
12919*b7893ccfSSadaf Ebrahimi // If the allocation doesn't fit before the end of dstBlock, forward to next block.
12920*b7893ccfSSadaf Ebrahimi while(dstBlockInfoIndex < srcBlockInfoIndex &&
12921*b7893ccfSSadaf Ebrahimi dstAllocOffset + srcAllocSize > dstBlockSize)
12922*b7893ccfSSadaf Ebrahimi {
12923*b7893ccfSSadaf Ebrahimi // But before that, register remaining free space at the end of dst block.
12924*b7893ccfSSadaf Ebrahimi freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
12925*b7893ccfSSadaf Ebrahimi
12926*b7893ccfSSadaf Ebrahimi ++dstBlockInfoIndex;
12927*b7893ccfSSadaf Ebrahimi dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12928*b7893ccfSSadaf Ebrahimi pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12929*b7893ccfSSadaf Ebrahimi pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12930*b7893ccfSSadaf Ebrahimi dstBlockSize = pDstMetadata->GetSize();
12931*b7893ccfSSadaf Ebrahimi dstOffset = 0;
12932*b7893ccfSSadaf Ebrahimi dstAllocOffset = 0;
12933*b7893ccfSSadaf Ebrahimi }
12934*b7893ccfSSadaf Ebrahimi
12935*b7893ccfSSadaf Ebrahimi // Same block
12936*b7893ccfSSadaf Ebrahimi if(dstBlockInfoIndex == srcBlockInfoIndex)
12937*b7893ccfSSadaf Ebrahimi {
12938*b7893ccfSSadaf Ebrahimi VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12939*b7893ccfSSadaf Ebrahimi
12940*b7893ccfSSadaf Ebrahimi const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
12941*b7893ccfSSadaf Ebrahimi
12942*b7893ccfSSadaf Ebrahimi bool skipOver = overlap;
12943*b7893ccfSSadaf Ebrahimi if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
12944*b7893ccfSSadaf Ebrahimi {
12945*b7893ccfSSadaf Ebrahimi // If destination and source place overlap, skip if it would move it
12946*b7893ccfSSadaf Ebrahimi // by only < 1/64 of its size.
12947*b7893ccfSSadaf Ebrahimi skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
12948*b7893ccfSSadaf Ebrahimi }
12949*b7893ccfSSadaf Ebrahimi
12950*b7893ccfSSadaf Ebrahimi if(skipOver)
12951*b7893ccfSSadaf Ebrahimi {
12952*b7893ccfSSadaf Ebrahimi freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
12953*b7893ccfSSadaf Ebrahimi
12954*b7893ccfSSadaf Ebrahimi dstOffset = srcAllocOffset + srcAllocSize;
12955*b7893ccfSSadaf Ebrahimi ++srcSuballocIt;
12956*b7893ccfSSadaf Ebrahimi }
12957*b7893ccfSSadaf Ebrahimi // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12958*b7893ccfSSadaf Ebrahimi else
12959*b7893ccfSSadaf Ebrahimi {
12960*b7893ccfSSadaf Ebrahimi srcSuballocIt->offset = dstAllocOffset;
12961*b7893ccfSSadaf Ebrahimi srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
12962*b7893ccfSSadaf Ebrahimi dstOffset = dstAllocOffset + srcAllocSize;
12963*b7893ccfSSadaf Ebrahimi m_BytesMoved += srcAllocSize;
12964*b7893ccfSSadaf Ebrahimi ++m_AllocationsMoved;
12965*b7893ccfSSadaf Ebrahimi ++srcSuballocIt;
12966*b7893ccfSSadaf Ebrahimi VmaDefragmentationMove move = {
12967*b7893ccfSSadaf Ebrahimi srcOrigBlockIndex, dstOrigBlockIndex,
12968*b7893ccfSSadaf Ebrahimi srcAllocOffset, dstAllocOffset,
12969*b7893ccfSSadaf Ebrahimi srcAllocSize };
12970*b7893ccfSSadaf Ebrahimi moves.push_back(move);
12971*b7893ccfSSadaf Ebrahimi }
12972*b7893ccfSSadaf Ebrahimi }
12973*b7893ccfSSadaf Ebrahimi // Different block
12974*b7893ccfSSadaf Ebrahimi else
12975*b7893ccfSSadaf Ebrahimi {
12976*b7893ccfSSadaf Ebrahimi // MOVE OPTION 2: Move the allocation to a different block.
12977*b7893ccfSSadaf Ebrahimi
12978*b7893ccfSSadaf Ebrahimi VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
12979*b7893ccfSSadaf Ebrahimi VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
12980*b7893ccfSSadaf Ebrahimi
12981*b7893ccfSSadaf Ebrahimi VmaSuballocation suballoc = *srcSuballocIt;
12982*b7893ccfSSadaf Ebrahimi suballoc.offset = dstAllocOffset;
12983*b7893ccfSSadaf Ebrahimi suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
12984*b7893ccfSSadaf Ebrahimi dstOffset = dstAllocOffset + srcAllocSize;
12985*b7893ccfSSadaf Ebrahimi m_BytesMoved += srcAllocSize;
12986*b7893ccfSSadaf Ebrahimi ++m_AllocationsMoved;
12987*b7893ccfSSadaf Ebrahimi
12988*b7893ccfSSadaf Ebrahimi VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12989*b7893ccfSSadaf Ebrahimi ++nextSuballocIt;
12990*b7893ccfSSadaf Ebrahimi pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12991*b7893ccfSSadaf Ebrahimi srcSuballocIt = nextSuballocIt;
12992*b7893ccfSSadaf Ebrahimi
12993*b7893ccfSSadaf Ebrahimi pDstMetadata->m_Suballocations.push_back(suballoc);
12994*b7893ccfSSadaf Ebrahimi
12995*b7893ccfSSadaf Ebrahimi VmaDefragmentationMove move = {
12996*b7893ccfSSadaf Ebrahimi srcOrigBlockIndex, dstOrigBlockIndex,
12997*b7893ccfSSadaf Ebrahimi srcAllocOffset, dstAllocOffset,
12998*b7893ccfSSadaf Ebrahimi srcAllocSize };
12999*b7893ccfSSadaf Ebrahimi moves.push_back(move);
13000*b7893ccfSSadaf Ebrahimi }
13001*b7893ccfSSadaf Ebrahimi }
13002*b7893ccfSSadaf Ebrahimi }
13003*b7893ccfSSadaf Ebrahimi }
13004*b7893ccfSSadaf Ebrahimi
13005*b7893ccfSSadaf Ebrahimi m_BlockInfos.clear();
13006*b7893ccfSSadaf Ebrahimi
13007*b7893ccfSSadaf Ebrahimi PostprocessMetadata();
13008*b7893ccfSSadaf Ebrahimi
13009*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
13010*b7893ccfSSadaf Ebrahimi }
13011*b7893ccfSSadaf Ebrahimi
PreprocessMetadata()13012*b7893ccfSSadaf Ebrahimi void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13013*b7893ccfSSadaf Ebrahimi {
13014*b7893ccfSSadaf Ebrahimi const size_t blockCount = m_pBlockVector->GetBlockCount();
13015*b7893ccfSSadaf Ebrahimi for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13016*b7893ccfSSadaf Ebrahimi {
13017*b7893ccfSSadaf Ebrahimi VmaBlockMetadata_Generic* const pMetadata =
13018*b7893ccfSSadaf Ebrahimi (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13019*b7893ccfSSadaf Ebrahimi pMetadata->m_FreeCount = 0;
13020*b7893ccfSSadaf Ebrahimi pMetadata->m_SumFreeSize = pMetadata->GetSize();
13021*b7893ccfSSadaf Ebrahimi pMetadata->m_FreeSuballocationsBySize.clear();
13022*b7893ccfSSadaf Ebrahimi for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13023*b7893ccfSSadaf Ebrahimi it != pMetadata->m_Suballocations.end(); )
13024*b7893ccfSSadaf Ebrahimi {
13025*b7893ccfSSadaf Ebrahimi if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13026*b7893ccfSSadaf Ebrahimi {
13027*b7893ccfSSadaf Ebrahimi VmaSuballocationList::iterator nextIt = it;
13028*b7893ccfSSadaf Ebrahimi ++nextIt;
13029*b7893ccfSSadaf Ebrahimi pMetadata->m_Suballocations.erase(it);
13030*b7893ccfSSadaf Ebrahimi it = nextIt;
13031*b7893ccfSSadaf Ebrahimi }
13032*b7893ccfSSadaf Ebrahimi else
13033*b7893ccfSSadaf Ebrahimi {
13034*b7893ccfSSadaf Ebrahimi ++it;
13035*b7893ccfSSadaf Ebrahimi }
13036*b7893ccfSSadaf Ebrahimi }
13037*b7893ccfSSadaf Ebrahimi }
13038*b7893ccfSSadaf Ebrahimi }
13039*b7893ccfSSadaf Ebrahimi
PostprocessMetadata()13040*b7893ccfSSadaf Ebrahimi void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13041*b7893ccfSSadaf Ebrahimi {
13042*b7893ccfSSadaf Ebrahimi const size_t blockCount = m_pBlockVector->GetBlockCount();
13043*b7893ccfSSadaf Ebrahimi for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13044*b7893ccfSSadaf Ebrahimi {
13045*b7893ccfSSadaf Ebrahimi VmaBlockMetadata_Generic* const pMetadata =
13046*b7893ccfSSadaf Ebrahimi (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13047*b7893ccfSSadaf Ebrahimi const VkDeviceSize blockSize = pMetadata->GetSize();
13048*b7893ccfSSadaf Ebrahimi
13049*b7893ccfSSadaf Ebrahimi // No allocations in this block - entire area is free.
13050*b7893ccfSSadaf Ebrahimi if(pMetadata->m_Suballocations.empty())
13051*b7893ccfSSadaf Ebrahimi {
13052*b7893ccfSSadaf Ebrahimi pMetadata->m_FreeCount = 1;
13053*b7893ccfSSadaf Ebrahimi //pMetadata->m_SumFreeSize is already set to blockSize.
13054*b7893ccfSSadaf Ebrahimi VmaSuballocation suballoc = {
13055*b7893ccfSSadaf Ebrahimi 0, // offset
13056*b7893ccfSSadaf Ebrahimi blockSize, // size
13057*b7893ccfSSadaf Ebrahimi VMA_NULL, // hAllocation
13058*b7893ccfSSadaf Ebrahimi VMA_SUBALLOCATION_TYPE_FREE };
13059*b7893ccfSSadaf Ebrahimi pMetadata->m_Suballocations.push_back(suballoc);
13060*b7893ccfSSadaf Ebrahimi pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13061*b7893ccfSSadaf Ebrahimi }
13062*b7893ccfSSadaf Ebrahimi // There are some allocations in this block.
13063*b7893ccfSSadaf Ebrahimi else
13064*b7893ccfSSadaf Ebrahimi {
13065*b7893ccfSSadaf Ebrahimi VkDeviceSize offset = 0;
13066*b7893ccfSSadaf Ebrahimi VmaSuballocationList::iterator it;
13067*b7893ccfSSadaf Ebrahimi for(it = pMetadata->m_Suballocations.begin();
13068*b7893ccfSSadaf Ebrahimi it != pMetadata->m_Suballocations.end();
13069*b7893ccfSSadaf Ebrahimi ++it)
13070*b7893ccfSSadaf Ebrahimi {
13071*b7893ccfSSadaf Ebrahimi VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13072*b7893ccfSSadaf Ebrahimi VMA_ASSERT(it->offset >= offset);
13073*b7893ccfSSadaf Ebrahimi
13074*b7893ccfSSadaf Ebrahimi // Need to insert preceding free space.
13075*b7893ccfSSadaf Ebrahimi if(it->offset > offset)
13076*b7893ccfSSadaf Ebrahimi {
13077*b7893ccfSSadaf Ebrahimi ++pMetadata->m_FreeCount;
13078*b7893ccfSSadaf Ebrahimi const VkDeviceSize freeSize = it->offset - offset;
13079*b7893ccfSSadaf Ebrahimi VmaSuballocation suballoc = {
13080*b7893ccfSSadaf Ebrahimi offset, // offset
13081*b7893ccfSSadaf Ebrahimi freeSize, // size
13082*b7893ccfSSadaf Ebrahimi VMA_NULL, // hAllocation
13083*b7893ccfSSadaf Ebrahimi VMA_SUBALLOCATION_TYPE_FREE };
13084*b7893ccfSSadaf Ebrahimi VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13085*b7893ccfSSadaf Ebrahimi if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13086*b7893ccfSSadaf Ebrahimi {
13087*b7893ccfSSadaf Ebrahimi pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13088*b7893ccfSSadaf Ebrahimi }
13089*b7893ccfSSadaf Ebrahimi }
13090*b7893ccfSSadaf Ebrahimi
13091*b7893ccfSSadaf Ebrahimi pMetadata->m_SumFreeSize -= it->size;
13092*b7893ccfSSadaf Ebrahimi offset = it->offset + it->size;
13093*b7893ccfSSadaf Ebrahimi }
13094*b7893ccfSSadaf Ebrahimi
13095*b7893ccfSSadaf Ebrahimi // Need to insert trailing free space.
13096*b7893ccfSSadaf Ebrahimi if(offset < blockSize)
13097*b7893ccfSSadaf Ebrahimi {
13098*b7893ccfSSadaf Ebrahimi ++pMetadata->m_FreeCount;
13099*b7893ccfSSadaf Ebrahimi const VkDeviceSize freeSize = blockSize - offset;
13100*b7893ccfSSadaf Ebrahimi VmaSuballocation suballoc = {
13101*b7893ccfSSadaf Ebrahimi offset, // offset
13102*b7893ccfSSadaf Ebrahimi freeSize, // size
13103*b7893ccfSSadaf Ebrahimi VMA_NULL, // hAllocation
13104*b7893ccfSSadaf Ebrahimi VMA_SUBALLOCATION_TYPE_FREE };
13105*b7893ccfSSadaf Ebrahimi VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13106*b7893ccfSSadaf Ebrahimi VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13107*b7893ccfSSadaf Ebrahimi if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13108*b7893ccfSSadaf Ebrahimi {
13109*b7893ccfSSadaf Ebrahimi pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13110*b7893ccfSSadaf Ebrahimi }
13111*b7893ccfSSadaf Ebrahimi }
13112*b7893ccfSSadaf Ebrahimi
13113*b7893ccfSSadaf Ebrahimi VMA_SORT(
13114*b7893ccfSSadaf Ebrahimi pMetadata->m_FreeSuballocationsBySize.begin(),
13115*b7893ccfSSadaf Ebrahimi pMetadata->m_FreeSuballocationsBySize.end(),
13116*b7893ccfSSadaf Ebrahimi VmaSuballocationItemSizeLess());
13117*b7893ccfSSadaf Ebrahimi }
13118*b7893ccfSSadaf Ebrahimi
13119*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(pMetadata->Validate());
13120*b7893ccfSSadaf Ebrahimi }
13121*b7893ccfSSadaf Ebrahimi }
13122*b7893ccfSSadaf Ebrahimi
InsertSuballoc(VmaBlockMetadata_Generic * pMetadata,const VmaSuballocation & suballoc)13123*b7893ccfSSadaf Ebrahimi void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13124*b7893ccfSSadaf Ebrahimi {
13125*b7893ccfSSadaf Ebrahimi // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13126*b7893ccfSSadaf Ebrahimi VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13127*b7893ccfSSadaf Ebrahimi while(it != pMetadata->m_Suballocations.end())
13128*b7893ccfSSadaf Ebrahimi {
13129*b7893ccfSSadaf Ebrahimi if(it->offset < suballoc.offset)
13130*b7893ccfSSadaf Ebrahimi {
13131*b7893ccfSSadaf Ebrahimi ++it;
13132*b7893ccfSSadaf Ebrahimi }
13133*b7893ccfSSadaf Ebrahimi }
13134*b7893ccfSSadaf Ebrahimi pMetadata->m_Suballocations.insert(it, suballoc);
13135*b7893ccfSSadaf Ebrahimi }
13136*b7893ccfSSadaf Ebrahimi
13137*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
13138*b7893ccfSSadaf Ebrahimi // VmaBlockVectorDefragmentationContext
13139*b7893ccfSSadaf Ebrahimi
VmaBlockVectorDefragmentationContext(VmaAllocator hAllocator,VmaPool hCustomPool,VmaBlockVector * pBlockVector,uint32_t currFrameIndex,uint32_t algorithmFlags)13140*b7893ccfSSadaf Ebrahimi VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13141*b7893ccfSSadaf Ebrahimi VmaAllocator hAllocator,
13142*b7893ccfSSadaf Ebrahimi VmaPool hCustomPool,
13143*b7893ccfSSadaf Ebrahimi VmaBlockVector* pBlockVector,
13144*b7893ccfSSadaf Ebrahimi uint32_t currFrameIndex,
13145*b7893ccfSSadaf Ebrahimi uint32_t algorithmFlags) :
13146*b7893ccfSSadaf Ebrahimi res(VK_SUCCESS),
13147*b7893ccfSSadaf Ebrahimi mutexLocked(false),
13148*b7893ccfSSadaf Ebrahimi blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13149*b7893ccfSSadaf Ebrahimi m_hAllocator(hAllocator),
13150*b7893ccfSSadaf Ebrahimi m_hCustomPool(hCustomPool),
13151*b7893ccfSSadaf Ebrahimi m_pBlockVector(pBlockVector),
13152*b7893ccfSSadaf Ebrahimi m_CurrFrameIndex(currFrameIndex),
13153*b7893ccfSSadaf Ebrahimi //m_AlgorithmFlags(algorithmFlags),
13154*b7893ccfSSadaf Ebrahimi m_pAlgorithm(VMA_NULL),
13155*b7893ccfSSadaf Ebrahimi m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13156*b7893ccfSSadaf Ebrahimi m_AllAllocations(false)
13157*b7893ccfSSadaf Ebrahimi {
13158*b7893ccfSSadaf Ebrahimi }
13159*b7893ccfSSadaf Ebrahimi
~VmaBlockVectorDefragmentationContext()13160*b7893ccfSSadaf Ebrahimi VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13161*b7893ccfSSadaf Ebrahimi {
13162*b7893ccfSSadaf Ebrahimi vma_delete(m_hAllocator, m_pAlgorithm);
13163*b7893ccfSSadaf Ebrahimi }
13164*b7893ccfSSadaf Ebrahimi
AddAllocation(VmaAllocation hAlloc,VkBool32 * pChanged)13165*b7893ccfSSadaf Ebrahimi void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13166*b7893ccfSSadaf Ebrahimi {
13167*b7893ccfSSadaf Ebrahimi AllocInfo info = { hAlloc, pChanged };
13168*b7893ccfSSadaf Ebrahimi m_Allocations.push_back(info);
13169*b7893ccfSSadaf Ebrahimi }
13170*b7893ccfSSadaf Ebrahimi
Begin(bool overlappingMoveSupported)13171*b7893ccfSSadaf Ebrahimi void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13172*b7893ccfSSadaf Ebrahimi {
13173*b7893ccfSSadaf Ebrahimi const bool allAllocations = m_AllAllocations ||
13174*b7893ccfSSadaf Ebrahimi m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13175*b7893ccfSSadaf Ebrahimi
13176*b7893ccfSSadaf Ebrahimi /********************************
13177*b7893ccfSSadaf Ebrahimi HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13178*b7893ccfSSadaf Ebrahimi ********************************/
13179*b7893ccfSSadaf Ebrahimi
13180*b7893ccfSSadaf Ebrahimi /*
13181*b7893ccfSSadaf Ebrahimi Fast algorithm is supported only when certain criteria are met:
13182*b7893ccfSSadaf Ebrahimi - VMA_DEBUG_MARGIN is 0.
13183*b7893ccfSSadaf Ebrahimi - All allocations in this block vector are moveable.
13184*b7893ccfSSadaf Ebrahimi - There is no possibility of image/buffer granularity conflict.
13185*b7893ccfSSadaf Ebrahimi */
13186*b7893ccfSSadaf Ebrahimi if(VMA_DEBUG_MARGIN == 0 &&
13187*b7893ccfSSadaf Ebrahimi allAllocations &&
13188*b7893ccfSSadaf Ebrahimi !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13189*b7893ccfSSadaf Ebrahimi {
13190*b7893ccfSSadaf Ebrahimi m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13191*b7893ccfSSadaf Ebrahimi m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13192*b7893ccfSSadaf Ebrahimi }
13193*b7893ccfSSadaf Ebrahimi else
13194*b7893ccfSSadaf Ebrahimi {
13195*b7893ccfSSadaf Ebrahimi m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13196*b7893ccfSSadaf Ebrahimi m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13197*b7893ccfSSadaf Ebrahimi }
13198*b7893ccfSSadaf Ebrahimi
13199*b7893ccfSSadaf Ebrahimi if(allAllocations)
13200*b7893ccfSSadaf Ebrahimi {
13201*b7893ccfSSadaf Ebrahimi m_pAlgorithm->AddAll();
13202*b7893ccfSSadaf Ebrahimi }
13203*b7893ccfSSadaf Ebrahimi else
13204*b7893ccfSSadaf Ebrahimi {
13205*b7893ccfSSadaf Ebrahimi for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13206*b7893ccfSSadaf Ebrahimi {
13207*b7893ccfSSadaf Ebrahimi m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13208*b7893ccfSSadaf Ebrahimi }
13209*b7893ccfSSadaf Ebrahimi }
13210*b7893ccfSSadaf Ebrahimi }
13211*b7893ccfSSadaf Ebrahimi
13212*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
13213*b7893ccfSSadaf Ebrahimi // VmaDefragmentationContext
13214*b7893ccfSSadaf Ebrahimi
VmaDefragmentationContext_T(VmaAllocator hAllocator,uint32_t currFrameIndex,uint32_t flags,VmaDefragmentationStats * pStats)13215*b7893ccfSSadaf Ebrahimi VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13216*b7893ccfSSadaf Ebrahimi VmaAllocator hAllocator,
13217*b7893ccfSSadaf Ebrahimi uint32_t currFrameIndex,
13218*b7893ccfSSadaf Ebrahimi uint32_t flags,
13219*b7893ccfSSadaf Ebrahimi VmaDefragmentationStats* pStats) :
13220*b7893ccfSSadaf Ebrahimi m_hAllocator(hAllocator),
13221*b7893ccfSSadaf Ebrahimi m_CurrFrameIndex(currFrameIndex),
13222*b7893ccfSSadaf Ebrahimi m_Flags(flags),
13223*b7893ccfSSadaf Ebrahimi m_pStats(pStats),
13224*b7893ccfSSadaf Ebrahimi m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13225*b7893ccfSSadaf Ebrahimi {
13226*b7893ccfSSadaf Ebrahimi memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13227*b7893ccfSSadaf Ebrahimi }
13228*b7893ccfSSadaf Ebrahimi
~VmaDefragmentationContext_T()13229*b7893ccfSSadaf Ebrahimi VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13230*b7893ccfSSadaf Ebrahimi {
13231*b7893ccfSSadaf Ebrahimi for(size_t i = m_CustomPoolContexts.size(); i--; )
13232*b7893ccfSSadaf Ebrahimi {
13233*b7893ccfSSadaf Ebrahimi VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13234*b7893ccfSSadaf Ebrahimi pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13235*b7893ccfSSadaf Ebrahimi vma_delete(m_hAllocator, pBlockVectorCtx);
13236*b7893ccfSSadaf Ebrahimi }
13237*b7893ccfSSadaf Ebrahimi for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13238*b7893ccfSSadaf Ebrahimi {
13239*b7893ccfSSadaf Ebrahimi VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13240*b7893ccfSSadaf Ebrahimi if(pBlockVectorCtx)
13241*b7893ccfSSadaf Ebrahimi {
13242*b7893ccfSSadaf Ebrahimi pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13243*b7893ccfSSadaf Ebrahimi vma_delete(m_hAllocator, pBlockVectorCtx);
13244*b7893ccfSSadaf Ebrahimi }
13245*b7893ccfSSadaf Ebrahimi }
13246*b7893ccfSSadaf Ebrahimi }
13247*b7893ccfSSadaf Ebrahimi
AddPools(uint32_t poolCount,VmaPool * pPools)13248*b7893ccfSSadaf Ebrahimi void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13249*b7893ccfSSadaf Ebrahimi {
13250*b7893ccfSSadaf Ebrahimi for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13251*b7893ccfSSadaf Ebrahimi {
13252*b7893ccfSSadaf Ebrahimi VmaPool pool = pPools[poolIndex];
13253*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pool);
13254*b7893ccfSSadaf Ebrahimi // Pools with algorithm other than default are not defragmented.
13255*b7893ccfSSadaf Ebrahimi if(pool->m_BlockVector.GetAlgorithm() == 0)
13256*b7893ccfSSadaf Ebrahimi {
13257*b7893ccfSSadaf Ebrahimi VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13258*b7893ccfSSadaf Ebrahimi
13259*b7893ccfSSadaf Ebrahimi for(size_t i = m_CustomPoolContexts.size(); i--; )
13260*b7893ccfSSadaf Ebrahimi {
13261*b7893ccfSSadaf Ebrahimi if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13262*b7893ccfSSadaf Ebrahimi {
13263*b7893ccfSSadaf Ebrahimi pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13264*b7893ccfSSadaf Ebrahimi break;
13265*b7893ccfSSadaf Ebrahimi }
13266*b7893ccfSSadaf Ebrahimi }
13267*b7893ccfSSadaf Ebrahimi
13268*b7893ccfSSadaf Ebrahimi if(!pBlockVectorDefragCtx)
13269*b7893ccfSSadaf Ebrahimi {
13270*b7893ccfSSadaf Ebrahimi pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13271*b7893ccfSSadaf Ebrahimi m_hAllocator,
13272*b7893ccfSSadaf Ebrahimi pool,
13273*b7893ccfSSadaf Ebrahimi &pool->m_BlockVector,
13274*b7893ccfSSadaf Ebrahimi m_CurrFrameIndex,
13275*b7893ccfSSadaf Ebrahimi m_Flags);
13276*b7893ccfSSadaf Ebrahimi m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13277*b7893ccfSSadaf Ebrahimi }
13278*b7893ccfSSadaf Ebrahimi
13279*b7893ccfSSadaf Ebrahimi pBlockVectorDefragCtx->AddAll();
13280*b7893ccfSSadaf Ebrahimi }
13281*b7893ccfSSadaf Ebrahimi }
13282*b7893ccfSSadaf Ebrahimi }
13283*b7893ccfSSadaf Ebrahimi
AddAllocations(uint32_t allocationCount,VmaAllocation * pAllocations,VkBool32 * pAllocationsChanged)13284*b7893ccfSSadaf Ebrahimi void VmaDefragmentationContext_T::AddAllocations(
13285*b7893ccfSSadaf Ebrahimi uint32_t allocationCount,
13286*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocations,
13287*b7893ccfSSadaf Ebrahimi VkBool32* pAllocationsChanged)
13288*b7893ccfSSadaf Ebrahimi {
13289*b7893ccfSSadaf Ebrahimi // Dispatch pAllocations among defragmentators. Create them when necessary.
13290*b7893ccfSSadaf Ebrahimi for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13291*b7893ccfSSadaf Ebrahimi {
13292*b7893ccfSSadaf Ebrahimi const VmaAllocation hAlloc = pAllocations[allocIndex];
13293*b7893ccfSSadaf Ebrahimi VMA_ASSERT(hAlloc);
13294*b7893ccfSSadaf Ebrahimi // DedicatedAlloc cannot be defragmented.
13295*b7893ccfSSadaf Ebrahimi if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13296*b7893ccfSSadaf Ebrahimi // Lost allocation cannot be defragmented.
13297*b7893ccfSSadaf Ebrahimi (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13298*b7893ccfSSadaf Ebrahimi {
13299*b7893ccfSSadaf Ebrahimi VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13300*b7893ccfSSadaf Ebrahimi
13301*b7893ccfSSadaf Ebrahimi const VmaPool hAllocPool = hAlloc->GetPool();
13302*b7893ccfSSadaf Ebrahimi // This allocation belongs to custom pool.
13303*b7893ccfSSadaf Ebrahimi if(hAllocPool != VK_NULL_HANDLE)
13304*b7893ccfSSadaf Ebrahimi {
13305*b7893ccfSSadaf Ebrahimi // Pools with algorithm other than default are not defragmented.
13306*b7893ccfSSadaf Ebrahimi if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13307*b7893ccfSSadaf Ebrahimi {
13308*b7893ccfSSadaf Ebrahimi for(size_t i = m_CustomPoolContexts.size(); i--; )
13309*b7893ccfSSadaf Ebrahimi {
13310*b7893ccfSSadaf Ebrahimi if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13311*b7893ccfSSadaf Ebrahimi {
13312*b7893ccfSSadaf Ebrahimi pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13313*b7893ccfSSadaf Ebrahimi break;
13314*b7893ccfSSadaf Ebrahimi }
13315*b7893ccfSSadaf Ebrahimi }
13316*b7893ccfSSadaf Ebrahimi if(!pBlockVectorDefragCtx)
13317*b7893ccfSSadaf Ebrahimi {
13318*b7893ccfSSadaf Ebrahimi pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13319*b7893ccfSSadaf Ebrahimi m_hAllocator,
13320*b7893ccfSSadaf Ebrahimi hAllocPool,
13321*b7893ccfSSadaf Ebrahimi &hAllocPool->m_BlockVector,
13322*b7893ccfSSadaf Ebrahimi m_CurrFrameIndex,
13323*b7893ccfSSadaf Ebrahimi m_Flags);
13324*b7893ccfSSadaf Ebrahimi m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13325*b7893ccfSSadaf Ebrahimi }
13326*b7893ccfSSadaf Ebrahimi }
13327*b7893ccfSSadaf Ebrahimi }
13328*b7893ccfSSadaf Ebrahimi // This allocation belongs to default pool.
13329*b7893ccfSSadaf Ebrahimi else
13330*b7893ccfSSadaf Ebrahimi {
13331*b7893ccfSSadaf Ebrahimi const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13332*b7893ccfSSadaf Ebrahimi pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13333*b7893ccfSSadaf Ebrahimi if(!pBlockVectorDefragCtx)
13334*b7893ccfSSadaf Ebrahimi {
13335*b7893ccfSSadaf Ebrahimi pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13336*b7893ccfSSadaf Ebrahimi m_hAllocator,
13337*b7893ccfSSadaf Ebrahimi VMA_NULL, // hCustomPool
13338*b7893ccfSSadaf Ebrahimi m_hAllocator->m_pBlockVectors[memTypeIndex],
13339*b7893ccfSSadaf Ebrahimi m_CurrFrameIndex,
13340*b7893ccfSSadaf Ebrahimi m_Flags);
13341*b7893ccfSSadaf Ebrahimi m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13342*b7893ccfSSadaf Ebrahimi }
13343*b7893ccfSSadaf Ebrahimi }
13344*b7893ccfSSadaf Ebrahimi
13345*b7893ccfSSadaf Ebrahimi if(pBlockVectorDefragCtx)
13346*b7893ccfSSadaf Ebrahimi {
13347*b7893ccfSSadaf Ebrahimi VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13348*b7893ccfSSadaf Ebrahimi &pAllocationsChanged[allocIndex] : VMA_NULL;
13349*b7893ccfSSadaf Ebrahimi pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13350*b7893ccfSSadaf Ebrahimi }
13351*b7893ccfSSadaf Ebrahimi }
13352*b7893ccfSSadaf Ebrahimi }
13353*b7893ccfSSadaf Ebrahimi }
13354*b7893ccfSSadaf Ebrahimi
Defragment(VkDeviceSize maxCpuBytesToMove,uint32_t maxCpuAllocationsToMove,VkDeviceSize maxGpuBytesToMove,uint32_t maxGpuAllocationsToMove,VkCommandBuffer commandBuffer,VmaDefragmentationStats * pStats)13355*b7893ccfSSadaf Ebrahimi VkResult VmaDefragmentationContext_T::Defragment(
13356*b7893ccfSSadaf Ebrahimi VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13357*b7893ccfSSadaf Ebrahimi VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13358*b7893ccfSSadaf Ebrahimi VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13359*b7893ccfSSadaf Ebrahimi {
13360*b7893ccfSSadaf Ebrahimi if(pStats)
13361*b7893ccfSSadaf Ebrahimi {
13362*b7893ccfSSadaf Ebrahimi memset(pStats, 0, sizeof(VmaDefragmentationStats));
13363*b7893ccfSSadaf Ebrahimi }
13364*b7893ccfSSadaf Ebrahimi
13365*b7893ccfSSadaf Ebrahimi if(commandBuffer == VK_NULL_HANDLE)
13366*b7893ccfSSadaf Ebrahimi {
13367*b7893ccfSSadaf Ebrahimi maxGpuBytesToMove = 0;
13368*b7893ccfSSadaf Ebrahimi maxGpuAllocationsToMove = 0;
13369*b7893ccfSSadaf Ebrahimi }
13370*b7893ccfSSadaf Ebrahimi
13371*b7893ccfSSadaf Ebrahimi VkResult res = VK_SUCCESS;
13372*b7893ccfSSadaf Ebrahimi
13373*b7893ccfSSadaf Ebrahimi // Process default pools.
13374*b7893ccfSSadaf Ebrahimi for(uint32_t memTypeIndex = 0;
13375*b7893ccfSSadaf Ebrahimi memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13376*b7893ccfSSadaf Ebrahimi ++memTypeIndex)
13377*b7893ccfSSadaf Ebrahimi {
13378*b7893ccfSSadaf Ebrahimi VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13379*b7893ccfSSadaf Ebrahimi if(pBlockVectorCtx)
13380*b7893ccfSSadaf Ebrahimi {
13381*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13382*b7893ccfSSadaf Ebrahimi pBlockVectorCtx->GetBlockVector()->Defragment(
13383*b7893ccfSSadaf Ebrahimi pBlockVectorCtx,
13384*b7893ccfSSadaf Ebrahimi pStats,
13385*b7893ccfSSadaf Ebrahimi maxCpuBytesToMove, maxCpuAllocationsToMove,
13386*b7893ccfSSadaf Ebrahimi maxGpuBytesToMove, maxGpuAllocationsToMove,
13387*b7893ccfSSadaf Ebrahimi commandBuffer);
13388*b7893ccfSSadaf Ebrahimi if(pBlockVectorCtx->res != VK_SUCCESS)
13389*b7893ccfSSadaf Ebrahimi {
13390*b7893ccfSSadaf Ebrahimi res = pBlockVectorCtx->res;
13391*b7893ccfSSadaf Ebrahimi }
13392*b7893ccfSSadaf Ebrahimi }
13393*b7893ccfSSadaf Ebrahimi }
13394*b7893ccfSSadaf Ebrahimi
13395*b7893ccfSSadaf Ebrahimi // Process custom pools.
13396*b7893ccfSSadaf Ebrahimi for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13397*b7893ccfSSadaf Ebrahimi customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13398*b7893ccfSSadaf Ebrahimi ++customCtxIndex)
13399*b7893ccfSSadaf Ebrahimi {
13400*b7893ccfSSadaf Ebrahimi VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13401*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13402*b7893ccfSSadaf Ebrahimi pBlockVectorCtx->GetBlockVector()->Defragment(
13403*b7893ccfSSadaf Ebrahimi pBlockVectorCtx,
13404*b7893ccfSSadaf Ebrahimi pStats,
13405*b7893ccfSSadaf Ebrahimi maxCpuBytesToMove, maxCpuAllocationsToMove,
13406*b7893ccfSSadaf Ebrahimi maxGpuBytesToMove, maxGpuAllocationsToMove,
13407*b7893ccfSSadaf Ebrahimi commandBuffer);
13408*b7893ccfSSadaf Ebrahimi if(pBlockVectorCtx->res != VK_SUCCESS)
13409*b7893ccfSSadaf Ebrahimi {
13410*b7893ccfSSadaf Ebrahimi res = pBlockVectorCtx->res;
13411*b7893ccfSSadaf Ebrahimi }
13412*b7893ccfSSadaf Ebrahimi }
13413*b7893ccfSSadaf Ebrahimi
13414*b7893ccfSSadaf Ebrahimi return res;
13415*b7893ccfSSadaf Ebrahimi }
13416*b7893ccfSSadaf Ebrahimi
13417*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
13418*b7893ccfSSadaf Ebrahimi // VmaRecorder
13419*b7893ccfSSadaf Ebrahimi
13420*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
13421*b7893ccfSSadaf Ebrahimi
VmaRecorder()13422*b7893ccfSSadaf Ebrahimi VmaRecorder::VmaRecorder() :
13423*b7893ccfSSadaf Ebrahimi m_UseMutex(true),
13424*b7893ccfSSadaf Ebrahimi m_Flags(0),
13425*b7893ccfSSadaf Ebrahimi m_File(VMA_NULL),
13426*b7893ccfSSadaf Ebrahimi m_Freq(INT64_MAX),
13427*b7893ccfSSadaf Ebrahimi m_StartCounter(INT64_MAX)
13428*b7893ccfSSadaf Ebrahimi {
13429*b7893ccfSSadaf Ebrahimi }
13430*b7893ccfSSadaf Ebrahimi
Init(const VmaRecordSettings & settings,bool useMutex)13431*b7893ccfSSadaf Ebrahimi VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13432*b7893ccfSSadaf Ebrahimi {
13433*b7893ccfSSadaf Ebrahimi m_UseMutex = useMutex;
13434*b7893ccfSSadaf Ebrahimi m_Flags = settings.flags;
13435*b7893ccfSSadaf Ebrahimi
13436*b7893ccfSSadaf Ebrahimi QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13437*b7893ccfSSadaf Ebrahimi QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13438*b7893ccfSSadaf Ebrahimi
13439*b7893ccfSSadaf Ebrahimi // Open file for writing.
13440*b7893ccfSSadaf Ebrahimi errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13441*b7893ccfSSadaf Ebrahimi if(err != 0)
13442*b7893ccfSSadaf Ebrahimi {
13443*b7893ccfSSadaf Ebrahimi return VK_ERROR_INITIALIZATION_FAILED;
13444*b7893ccfSSadaf Ebrahimi }
13445*b7893ccfSSadaf Ebrahimi
13446*b7893ccfSSadaf Ebrahimi // Write header.
13447*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13448*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%s\n", "1,5");
13449*b7893ccfSSadaf Ebrahimi
13450*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
13451*b7893ccfSSadaf Ebrahimi }
13452*b7893ccfSSadaf Ebrahimi
~VmaRecorder()13453*b7893ccfSSadaf Ebrahimi VmaRecorder::~VmaRecorder()
13454*b7893ccfSSadaf Ebrahimi {
13455*b7893ccfSSadaf Ebrahimi if(m_File != VMA_NULL)
13456*b7893ccfSSadaf Ebrahimi {
13457*b7893ccfSSadaf Ebrahimi fclose(m_File);
13458*b7893ccfSSadaf Ebrahimi }
13459*b7893ccfSSadaf Ebrahimi }
13460*b7893ccfSSadaf Ebrahimi
RecordCreateAllocator(uint32_t frameIndex)13461*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13462*b7893ccfSSadaf Ebrahimi {
13463*b7893ccfSSadaf Ebrahimi CallParams callParams;
13464*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13465*b7893ccfSSadaf Ebrahimi
13466*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13467*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13468*b7893ccfSSadaf Ebrahimi Flush();
13469*b7893ccfSSadaf Ebrahimi }
13470*b7893ccfSSadaf Ebrahimi
RecordDestroyAllocator(uint32_t frameIndex)13471*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13472*b7893ccfSSadaf Ebrahimi {
13473*b7893ccfSSadaf Ebrahimi CallParams callParams;
13474*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13475*b7893ccfSSadaf Ebrahimi
13476*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13477*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13478*b7893ccfSSadaf Ebrahimi Flush();
13479*b7893ccfSSadaf Ebrahimi }
13480*b7893ccfSSadaf Ebrahimi
RecordCreatePool(uint32_t frameIndex,const VmaPoolCreateInfo & createInfo,VmaPool pool)13481*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13482*b7893ccfSSadaf Ebrahimi {
13483*b7893ccfSSadaf Ebrahimi CallParams callParams;
13484*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13485*b7893ccfSSadaf Ebrahimi
13486*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13487*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13488*b7893ccfSSadaf Ebrahimi createInfo.memoryTypeIndex,
13489*b7893ccfSSadaf Ebrahimi createInfo.flags,
13490*b7893ccfSSadaf Ebrahimi createInfo.blockSize,
13491*b7893ccfSSadaf Ebrahimi (uint64_t)createInfo.minBlockCount,
13492*b7893ccfSSadaf Ebrahimi (uint64_t)createInfo.maxBlockCount,
13493*b7893ccfSSadaf Ebrahimi createInfo.frameInUseCount,
13494*b7893ccfSSadaf Ebrahimi pool);
13495*b7893ccfSSadaf Ebrahimi Flush();
13496*b7893ccfSSadaf Ebrahimi }
13497*b7893ccfSSadaf Ebrahimi
RecordDestroyPool(uint32_t frameIndex,VmaPool pool)13498*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13499*b7893ccfSSadaf Ebrahimi {
13500*b7893ccfSSadaf Ebrahimi CallParams callParams;
13501*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13502*b7893ccfSSadaf Ebrahimi
13503*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13504*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13505*b7893ccfSSadaf Ebrahimi pool);
13506*b7893ccfSSadaf Ebrahimi Flush();
13507*b7893ccfSSadaf Ebrahimi }
13508*b7893ccfSSadaf Ebrahimi
RecordAllocateMemory(uint32_t frameIndex,const VkMemoryRequirements & vkMemReq,const VmaAllocationCreateInfo & createInfo,VmaAllocation allocation)13509*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13510*b7893ccfSSadaf Ebrahimi const VkMemoryRequirements& vkMemReq,
13511*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo& createInfo,
13512*b7893ccfSSadaf Ebrahimi VmaAllocation allocation)
13513*b7893ccfSSadaf Ebrahimi {
13514*b7893ccfSSadaf Ebrahimi CallParams callParams;
13515*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13516*b7893ccfSSadaf Ebrahimi
13517*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13518*b7893ccfSSadaf Ebrahimi UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13519*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13520*b7893ccfSSadaf Ebrahimi vkMemReq.size,
13521*b7893ccfSSadaf Ebrahimi vkMemReq.alignment,
13522*b7893ccfSSadaf Ebrahimi vkMemReq.memoryTypeBits,
13523*b7893ccfSSadaf Ebrahimi createInfo.flags,
13524*b7893ccfSSadaf Ebrahimi createInfo.usage,
13525*b7893ccfSSadaf Ebrahimi createInfo.requiredFlags,
13526*b7893ccfSSadaf Ebrahimi createInfo.preferredFlags,
13527*b7893ccfSSadaf Ebrahimi createInfo.memoryTypeBits,
13528*b7893ccfSSadaf Ebrahimi createInfo.pool,
13529*b7893ccfSSadaf Ebrahimi allocation,
13530*b7893ccfSSadaf Ebrahimi userDataStr.GetString());
13531*b7893ccfSSadaf Ebrahimi Flush();
13532*b7893ccfSSadaf Ebrahimi }
13533*b7893ccfSSadaf Ebrahimi
RecordAllocateMemoryPages(uint32_t frameIndex,const VkMemoryRequirements & vkMemReq,const VmaAllocationCreateInfo & createInfo,uint64_t allocationCount,const VmaAllocation * pAllocations)13534*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13535*b7893ccfSSadaf Ebrahimi const VkMemoryRequirements& vkMemReq,
13536*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo& createInfo,
13537*b7893ccfSSadaf Ebrahimi uint64_t allocationCount,
13538*b7893ccfSSadaf Ebrahimi const VmaAllocation* pAllocations)
13539*b7893ccfSSadaf Ebrahimi {
13540*b7893ccfSSadaf Ebrahimi CallParams callParams;
13541*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13542*b7893ccfSSadaf Ebrahimi
13543*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13544*b7893ccfSSadaf Ebrahimi UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13545*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13546*b7893ccfSSadaf Ebrahimi vkMemReq.size,
13547*b7893ccfSSadaf Ebrahimi vkMemReq.alignment,
13548*b7893ccfSSadaf Ebrahimi vkMemReq.memoryTypeBits,
13549*b7893ccfSSadaf Ebrahimi createInfo.flags,
13550*b7893ccfSSadaf Ebrahimi createInfo.usage,
13551*b7893ccfSSadaf Ebrahimi createInfo.requiredFlags,
13552*b7893ccfSSadaf Ebrahimi createInfo.preferredFlags,
13553*b7893ccfSSadaf Ebrahimi createInfo.memoryTypeBits,
13554*b7893ccfSSadaf Ebrahimi createInfo.pool);
13555*b7893ccfSSadaf Ebrahimi PrintPointerList(allocationCount, pAllocations);
13556*b7893ccfSSadaf Ebrahimi fprintf(m_File, ",%s\n", userDataStr.GetString());
13557*b7893ccfSSadaf Ebrahimi Flush();
13558*b7893ccfSSadaf Ebrahimi }
13559*b7893ccfSSadaf Ebrahimi
RecordAllocateMemoryForBuffer(uint32_t frameIndex,const VkMemoryRequirements & vkMemReq,bool requiresDedicatedAllocation,bool prefersDedicatedAllocation,const VmaAllocationCreateInfo & createInfo,VmaAllocation allocation)13560*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13561*b7893ccfSSadaf Ebrahimi const VkMemoryRequirements& vkMemReq,
13562*b7893ccfSSadaf Ebrahimi bool requiresDedicatedAllocation,
13563*b7893ccfSSadaf Ebrahimi bool prefersDedicatedAllocation,
13564*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo& createInfo,
13565*b7893ccfSSadaf Ebrahimi VmaAllocation allocation)
13566*b7893ccfSSadaf Ebrahimi {
13567*b7893ccfSSadaf Ebrahimi CallParams callParams;
13568*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13569*b7893ccfSSadaf Ebrahimi
13570*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13571*b7893ccfSSadaf Ebrahimi UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13572*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13573*b7893ccfSSadaf Ebrahimi vkMemReq.size,
13574*b7893ccfSSadaf Ebrahimi vkMemReq.alignment,
13575*b7893ccfSSadaf Ebrahimi vkMemReq.memoryTypeBits,
13576*b7893ccfSSadaf Ebrahimi requiresDedicatedAllocation ? 1 : 0,
13577*b7893ccfSSadaf Ebrahimi prefersDedicatedAllocation ? 1 : 0,
13578*b7893ccfSSadaf Ebrahimi createInfo.flags,
13579*b7893ccfSSadaf Ebrahimi createInfo.usage,
13580*b7893ccfSSadaf Ebrahimi createInfo.requiredFlags,
13581*b7893ccfSSadaf Ebrahimi createInfo.preferredFlags,
13582*b7893ccfSSadaf Ebrahimi createInfo.memoryTypeBits,
13583*b7893ccfSSadaf Ebrahimi createInfo.pool,
13584*b7893ccfSSadaf Ebrahimi allocation,
13585*b7893ccfSSadaf Ebrahimi userDataStr.GetString());
13586*b7893ccfSSadaf Ebrahimi Flush();
13587*b7893ccfSSadaf Ebrahimi }
13588*b7893ccfSSadaf Ebrahimi
RecordAllocateMemoryForImage(uint32_t frameIndex,const VkMemoryRequirements & vkMemReq,bool requiresDedicatedAllocation,bool prefersDedicatedAllocation,const VmaAllocationCreateInfo & createInfo,VmaAllocation allocation)13589*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13590*b7893ccfSSadaf Ebrahimi const VkMemoryRequirements& vkMemReq,
13591*b7893ccfSSadaf Ebrahimi bool requiresDedicatedAllocation,
13592*b7893ccfSSadaf Ebrahimi bool prefersDedicatedAllocation,
13593*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo& createInfo,
13594*b7893ccfSSadaf Ebrahimi VmaAllocation allocation)
13595*b7893ccfSSadaf Ebrahimi {
13596*b7893ccfSSadaf Ebrahimi CallParams callParams;
13597*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13598*b7893ccfSSadaf Ebrahimi
13599*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13600*b7893ccfSSadaf Ebrahimi UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13601*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13602*b7893ccfSSadaf Ebrahimi vkMemReq.size,
13603*b7893ccfSSadaf Ebrahimi vkMemReq.alignment,
13604*b7893ccfSSadaf Ebrahimi vkMemReq.memoryTypeBits,
13605*b7893ccfSSadaf Ebrahimi requiresDedicatedAllocation ? 1 : 0,
13606*b7893ccfSSadaf Ebrahimi prefersDedicatedAllocation ? 1 : 0,
13607*b7893ccfSSadaf Ebrahimi createInfo.flags,
13608*b7893ccfSSadaf Ebrahimi createInfo.usage,
13609*b7893ccfSSadaf Ebrahimi createInfo.requiredFlags,
13610*b7893ccfSSadaf Ebrahimi createInfo.preferredFlags,
13611*b7893ccfSSadaf Ebrahimi createInfo.memoryTypeBits,
13612*b7893ccfSSadaf Ebrahimi createInfo.pool,
13613*b7893ccfSSadaf Ebrahimi allocation,
13614*b7893ccfSSadaf Ebrahimi userDataStr.GetString());
13615*b7893ccfSSadaf Ebrahimi Flush();
13616*b7893ccfSSadaf Ebrahimi }
13617*b7893ccfSSadaf Ebrahimi
RecordFreeMemory(uint32_t frameIndex,VmaAllocation allocation)13618*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13619*b7893ccfSSadaf Ebrahimi VmaAllocation allocation)
13620*b7893ccfSSadaf Ebrahimi {
13621*b7893ccfSSadaf Ebrahimi CallParams callParams;
13622*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13623*b7893ccfSSadaf Ebrahimi
13624*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13625*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13626*b7893ccfSSadaf Ebrahimi allocation);
13627*b7893ccfSSadaf Ebrahimi Flush();
13628*b7893ccfSSadaf Ebrahimi }
13629*b7893ccfSSadaf Ebrahimi
RecordFreeMemoryPages(uint32_t frameIndex,uint64_t allocationCount,const VmaAllocation * pAllocations)13630*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13631*b7893ccfSSadaf Ebrahimi uint64_t allocationCount,
13632*b7893ccfSSadaf Ebrahimi const VmaAllocation* pAllocations)
13633*b7893ccfSSadaf Ebrahimi {
13634*b7893ccfSSadaf Ebrahimi CallParams callParams;
13635*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13636*b7893ccfSSadaf Ebrahimi
13637*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13638*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13639*b7893ccfSSadaf Ebrahimi PrintPointerList(allocationCount, pAllocations);
13640*b7893ccfSSadaf Ebrahimi fprintf(m_File, "\n");
13641*b7893ccfSSadaf Ebrahimi Flush();
13642*b7893ccfSSadaf Ebrahimi }
13643*b7893ccfSSadaf Ebrahimi
RecordResizeAllocation(uint32_t frameIndex,VmaAllocation allocation,VkDeviceSize newSize)13644*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordResizeAllocation(
13645*b7893ccfSSadaf Ebrahimi uint32_t frameIndex,
13646*b7893ccfSSadaf Ebrahimi VmaAllocation allocation,
13647*b7893ccfSSadaf Ebrahimi VkDeviceSize newSize)
13648*b7893ccfSSadaf Ebrahimi {
13649*b7893ccfSSadaf Ebrahimi CallParams callParams;
13650*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13651*b7893ccfSSadaf Ebrahimi
13652*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13653*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13654*b7893ccfSSadaf Ebrahimi allocation, newSize);
13655*b7893ccfSSadaf Ebrahimi Flush();
13656*b7893ccfSSadaf Ebrahimi }
13657*b7893ccfSSadaf Ebrahimi
RecordSetAllocationUserData(uint32_t frameIndex,VmaAllocation allocation,const void * pUserData)13658*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13659*b7893ccfSSadaf Ebrahimi VmaAllocation allocation,
13660*b7893ccfSSadaf Ebrahimi const void* pUserData)
13661*b7893ccfSSadaf Ebrahimi {
13662*b7893ccfSSadaf Ebrahimi CallParams callParams;
13663*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13664*b7893ccfSSadaf Ebrahimi
13665*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13666*b7893ccfSSadaf Ebrahimi UserDataString userDataStr(
13667*b7893ccfSSadaf Ebrahimi allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13668*b7893ccfSSadaf Ebrahimi pUserData);
13669*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13670*b7893ccfSSadaf Ebrahimi allocation,
13671*b7893ccfSSadaf Ebrahimi userDataStr.GetString());
13672*b7893ccfSSadaf Ebrahimi Flush();
13673*b7893ccfSSadaf Ebrahimi }
13674*b7893ccfSSadaf Ebrahimi
RecordCreateLostAllocation(uint32_t frameIndex,VmaAllocation allocation)13675*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13676*b7893ccfSSadaf Ebrahimi VmaAllocation allocation)
13677*b7893ccfSSadaf Ebrahimi {
13678*b7893ccfSSadaf Ebrahimi CallParams callParams;
13679*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13680*b7893ccfSSadaf Ebrahimi
13681*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13682*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13683*b7893ccfSSadaf Ebrahimi allocation);
13684*b7893ccfSSadaf Ebrahimi Flush();
13685*b7893ccfSSadaf Ebrahimi }
13686*b7893ccfSSadaf Ebrahimi
RecordMapMemory(uint32_t frameIndex,VmaAllocation allocation)13687*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13688*b7893ccfSSadaf Ebrahimi VmaAllocation allocation)
13689*b7893ccfSSadaf Ebrahimi {
13690*b7893ccfSSadaf Ebrahimi CallParams callParams;
13691*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13692*b7893ccfSSadaf Ebrahimi
13693*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13694*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13695*b7893ccfSSadaf Ebrahimi allocation);
13696*b7893ccfSSadaf Ebrahimi Flush();
13697*b7893ccfSSadaf Ebrahimi }
13698*b7893ccfSSadaf Ebrahimi
RecordUnmapMemory(uint32_t frameIndex,VmaAllocation allocation)13699*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13700*b7893ccfSSadaf Ebrahimi VmaAllocation allocation)
13701*b7893ccfSSadaf Ebrahimi {
13702*b7893ccfSSadaf Ebrahimi CallParams callParams;
13703*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13704*b7893ccfSSadaf Ebrahimi
13705*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13706*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13707*b7893ccfSSadaf Ebrahimi allocation);
13708*b7893ccfSSadaf Ebrahimi Flush();
13709*b7893ccfSSadaf Ebrahimi }
13710*b7893ccfSSadaf Ebrahimi
RecordFlushAllocation(uint32_t frameIndex,VmaAllocation allocation,VkDeviceSize offset,VkDeviceSize size)13711*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13712*b7893ccfSSadaf Ebrahimi VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13713*b7893ccfSSadaf Ebrahimi {
13714*b7893ccfSSadaf Ebrahimi CallParams callParams;
13715*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13716*b7893ccfSSadaf Ebrahimi
13717*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13718*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13719*b7893ccfSSadaf Ebrahimi allocation,
13720*b7893ccfSSadaf Ebrahimi offset,
13721*b7893ccfSSadaf Ebrahimi size);
13722*b7893ccfSSadaf Ebrahimi Flush();
13723*b7893ccfSSadaf Ebrahimi }
13724*b7893ccfSSadaf Ebrahimi
RecordInvalidateAllocation(uint32_t frameIndex,VmaAllocation allocation,VkDeviceSize offset,VkDeviceSize size)13725*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13726*b7893ccfSSadaf Ebrahimi VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13727*b7893ccfSSadaf Ebrahimi {
13728*b7893ccfSSadaf Ebrahimi CallParams callParams;
13729*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13730*b7893ccfSSadaf Ebrahimi
13731*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13732*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13733*b7893ccfSSadaf Ebrahimi allocation,
13734*b7893ccfSSadaf Ebrahimi offset,
13735*b7893ccfSSadaf Ebrahimi size);
13736*b7893ccfSSadaf Ebrahimi Flush();
13737*b7893ccfSSadaf Ebrahimi }
13738*b7893ccfSSadaf Ebrahimi
RecordCreateBuffer(uint32_t frameIndex,const VkBufferCreateInfo & bufCreateInfo,const VmaAllocationCreateInfo & allocCreateInfo,VmaAllocation allocation)13739*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13740*b7893ccfSSadaf Ebrahimi const VkBufferCreateInfo& bufCreateInfo,
13741*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo& allocCreateInfo,
13742*b7893ccfSSadaf Ebrahimi VmaAllocation allocation)
13743*b7893ccfSSadaf Ebrahimi {
13744*b7893ccfSSadaf Ebrahimi CallParams callParams;
13745*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13746*b7893ccfSSadaf Ebrahimi
13747*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13748*b7893ccfSSadaf Ebrahimi UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13749*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13750*b7893ccfSSadaf Ebrahimi bufCreateInfo.flags,
13751*b7893ccfSSadaf Ebrahimi bufCreateInfo.size,
13752*b7893ccfSSadaf Ebrahimi bufCreateInfo.usage,
13753*b7893ccfSSadaf Ebrahimi bufCreateInfo.sharingMode,
13754*b7893ccfSSadaf Ebrahimi allocCreateInfo.flags,
13755*b7893ccfSSadaf Ebrahimi allocCreateInfo.usage,
13756*b7893ccfSSadaf Ebrahimi allocCreateInfo.requiredFlags,
13757*b7893ccfSSadaf Ebrahimi allocCreateInfo.preferredFlags,
13758*b7893ccfSSadaf Ebrahimi allocCreateInfo.memoryTypeBits,
13759*b7893ccfSSadaf Ebrahimi allocCreateInfo.pool,
13760*b7893ccfSSadaf Ebrahimi allocation,
13761*b7893ccfSSadaf Ebrahimi userDataStr.GetString());
13762*b7893ccfSSadaf Ebrahimi Flush();
13763*b7893ccfSSadaf Ebrahimi }
13764*b7893ccfSSadaf Ebrahimi
RecordCreateImage(uint32_t frameIndex,const VkImageCreateInfo & imageCreateInfo,const VmaAllocationCreateInfo & allocCreateInfo,VmaAllocation allocation)13765*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13766*b7893ccfSSadaf Ebrahimi const VkImageCreateInfo& imageCreateInfo,
13767*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo& allocCreateInfo,
13768*b7893ccfSSadaf Ebrahimi VmaAllocation allocation)
13769*b7893ccfSSadaf Ebrahimi {
13770*b7893ccfSSadaf Ebrahimi CallParams callParams;
13771*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13772*b7893ccfSSadaf Ebrahimi
13773*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13774*b7893ccfSSadaf Ebrahimi UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13775*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13776*b7893ccfSSadaf Ebrahimi imageCreateInfo.flags,
13777*b7893ccfSSadaf Ebrahimi imageCreateInfo.imageType,
13778*b7893ccfSSadaf Ebrahimi imageCreateInfo.format,
13779*b7893ccfSSadaf Ebrahimi imageCreateInfo.extent.width,
13780*b7893ccfSSadaf Ebrahimi imageCreateInfo.extent.height,
13781*b7893ccfSSadaf Ebrahimi imageCreateInfo.extent.depth,
13782*b7893ccfSSadaf Ebrahimi imageCreateInfo.mipLevels,
13783*b7893ccfSSadaf Ebrahimi imageCreateInfo.arrayLayers,
13784*b7893ccfSSadaf Ebrahimi imageCreateInfo.samples,
13785*b7893ccfSSadaf Ebrahimi imageCreateInfo.tiling,
13786*b7893ccfSSadaf Ebrahimi imageCreateInfo.usage,
13787*b7893ccfSSadaf Ebrahimi imageCreateInfo.sharingMode,
13788*b7893ccfSSadaf Ebrahimi imageCreateInfo.initialLayout,
13789*b7893ccfSSadaf Ebrahimi allocCreateInfo.flags,
13790*b7893ccfSSadaf Ebrahimi allocCreateInfo.usage,
13791*b7893ccfSSadaf Ebrahimi allocCreateInfo.requiredFlags,
13792*b7893ccfSSadaf Ebrahimi allocCreateInfo.preferredFlags,
13793*b7893ccfSSadaf Ebrahimi allocCreateInfo.memoryTypeBits,
13794*b7893ccfSSadaf Ebrahimi allocCreateInfo.pool,
13795*b7893ccfSSadaf Ebrahimi allocation,
13796*b7893ccfSSadaf Ebrahimi userDataStr.GetString());
13797*b7893ccfSSadaf Ebrahimi Flush();
13798*b7893ccfSSadaf Ebrahimi }
13799*b7893ccfSSadaf Ebrahimi
RecordDestroyBuffer(uint32_t frameIndex,VmaAllocation allocation)13800*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13801*b7893ccfSSadaf Ebrahimi VmaAllocation allocation)
13802*b7893ccfSSadaf Ebrahimi {
13803*b7893ccfSSadaf Ebrahimi CallParams callParams;
13804*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13805*b7893ccfSSadaf Ebrahimi
13806*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13807*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13808*b7893ccfSSadaf Ebrahimi allocation);
13809*b7893ccfSSadaf Ebrahimi Flush();
13810*b7893ccfSSadaf Ebrahimi }
13811*b7893ccfSSadaf Ebrahimi
RecordDestroyImage(uint32_t frameIndex,VmaAllocation allocation)13812*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13813*b7893ccfSSadaf Ebrahimi VmaAllocation allocation)
13814*b7893ccfSSadaf Ebrahimi {
13815*b7893ccfSSadaf Ebrahimi CallParams callParams;
13816*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13817*b7893ccfSSadaf Ebrahimi
13818*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13819*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13820*b7893ccfSSadaf Ebrahimi allocation);
13821*b7893ccfSSadaf Ebrahimi Flush();
13822*b7893ccfSSadaf Ebrahimi }
13823*b7893ccfSSadaf Ebrahimi
RecordTouchAllocation(uint32_t frameIndex,VmaAllocation allocation)13824*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13825*b7893ccfSSadaf Ebrahimi VmaAllocation allocation)
13826*b7893ccfSSadaf Ebrahimi {
13827*b7893ccfSSadaf Ebrahimi CallParams callParams;
13828*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13829*b7893ccfSSadaf Ebrahimi
13830*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13831*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13832*b7893ccfSSadaf Ebrahimi allocation);
13833*b7893ccfSSadaf Ebrahimi Flush();
13834*b7893ccfSSadaf Ebrahimi }
13835*b7893ccfSSadaf Ebrahimi
RecordGetAllocationInfo(uint32_t frameIndex,VmaAllocation allocation)13836*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13837*b7893ccfSSadaf Ebrahimi VmaAllocation allocation)
13838*b7893ccfSSadaf Ebrahimi {
13839*b7893ccfSSadaf Ebrahimi CallParams callParams;
13840*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13841*b7893ccfSSadaf Ebrahimi
13842*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13843*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13844*b7893ccfSSadaf Ebrahimi allocation);
13845*b7893ccfSSadaf Ebrahimi Flush();
13846*b7893ccfSSadaf Ebrahimi }
13847*b7893ccfSSadaf Ebrahimi
RecordMakePoolAllocationsLost(uint32_t frameIndex,VmaPool pool)13848*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
13849*b7893ccfSSadaf Ebrahimi VmaPool pool)
13850*b7893ccfSSadaf Ebrahimi {
13851*b7893ccfSSadaf Ebrahimi CallParams callParams;
13852*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13853*b7893ccfSSadaf Ebrahimi
13854*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13855*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
13856*b7893ccfSSadaf Ebrahimi pool);
13857*b7893ccfSSadaf Ebrahimi Flush();
13858*b7893ccfSSadaf Ebrahimi }
13859*b7893ccfSSadaf Ebrahimi
RecordDefragmentationBegin(uint32_t frameIndex,const VmaDefragmentationInfo2 & info,VmaDefragmentationContext ctx)13860*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
13861*b7893ccfSSadaf Ebrahimi const VmaDefragmentationInfo2& info,
13862*b7893ccfSSadaf Ebrahimi VmaDefragmentationContext ctx)
13863*b7893ccfSSadaf Ebrahimi {
13864*b7893ccfSSadaf Ebrahimi CallParams callParams;
13865*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13866*b7893ccfSSadaf Ebrahimi
13867*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13868*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
13869*b7893ccfSSadaf Ebrahimi info.flags);
13870*b7893ccfSSadaf Ebrahimi PrintPointerList(info.allocationCount, info.pAllocations);
13871*b7893ccfSSadaf Ebrahimi fprintf(m_File, ",");
13872*b7893ccfSSadaf Ebrahimi PrintPointerList(info.poolCount, info.pPools);
13873*b7893ccfSSadaf Ebrahimi fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
13874*b7893ccfSSadaf Ebrahimi info.maxCpuBytesToMove,
13875*b7893ccfSSadaf Ebrahimi info.maxCpuAllocationsToMove,
13876*b7893ccfSSadaf Ebrahimi info.maxGpuBytesToMove,
13877*b7893ccfSSadaf Ebrahimi info.maxGpuAllocationsToMove,
13878*b7893ccfSSadaf Ebrahimi info.commandBuffer,
13879*b7893ccfSSadaf Ebrahimi ctx);
13880*b7893ccfSSadaf Ebrahimi Flush();
13881*b7893ccfSSadaf Ebrahimi }
13882*b7893ccfSSadaf Ebrahimi
RecordDefragmentationEnd(uint32_t frameIndex,VmaDefragmentationContext ctx)13883*b7893ccfSSadaf Ebrahimi void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
13884*b7893ccfSSadaf Ebrahimi VmaDefragmentationContext ctx)
13885*b7893ccfSSadaf Ebrahimi {
13886*b7893ccfSSadaf Ebrahimi CallParams callParams;
13887*b7893ccfSSadaf Ebrahimi GetBasicParams(callParams);
13888*b7893ccfSSadaf Ebrahimi
13889*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_FileMutex, m_UseMutex);
13890*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
13891*b7893ccfSSadaf Ebrahimi ctx);
13892*b7893ccfSSadaf Ebrahimi Flush();
13893*b7893ccfSSadaf Ebrahimi }
13894*b7893ccfSSadaf Ebrahimi
UserDataString(VmaAllocationCreateFlags allocFlags,const void * pUserData)13895*b7893ccfSSadaf Ebrahimi VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
13896*b7893ccfSSadaf Ebrahimi {
13897*b7893ccfSSadaf Ebrahimi if(pUserData != VMA_NULL)
13898*b7893ccfSSadaf Ebrahimi {
13899*b7893ccfSSadaf Ebrahimi if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
13900*b7893ccfSSadaf Ebrahimi {
13901*b7893ccfSSadaf Ebrahimi m_Str = (const char*)pUserData;
13902*b7893ccfSSadaf Ebrahimi }
13903*b7893ccfSSadaf Ebrahimi else
13904*b7893ccfSSadaf Ebrahimi {
13905*b7893ccfSSadaf Ebrahimi sprintf_s(m_PtrStr, "%p", pUserData);
13906*b7893ccfSSadaf Ebrahimi m_Str = m_PtrStr;
13907*b7893ccfSSadaf Ebrahimi }
13908*b7893ccfSSadaf Ebrahimi }
13909*b7893ccfSSadaf Ebrahimi else
13910*b7893ccfSSadaf Ebrahimi {
13911*b7893ccfSSadaf Ebrahimi m_Str = "";
13912*b7893ccfSSadaf Ebrahimi }
13913*b7893ccfSSadaf Ebrahimi }
13914*b7893ccfSSadaf Ebrahimi
WriteConfiguration(const VkPhysicalDeviceProperties & devProps,const VkPhysicalDeviceMemoryProperties & memProps,bool dedicatedAllocationExtensionEnabled)13915*b7893ccfSSadaf Ebrahimi void VmaRecorder::WriteConfiguration(
13916*b7893ccfSSadaf Ebrahimi const VkPhysicalDeviceProperties& devProps,
13917*b7893ccfSSadaf Ebrahimi const VkPhysicalDeviceMemoryProperties& memProps,
13918*b7893ccfSSadaf Ebrahimi bool dedicatedAllocationExtensionEnabled)
13919*b7893ccfSSadaf Ebrahimi {
13920*b7893ccfSSadaf Ebrahimi fprintf(m_File, "Config,Begin\n");
13921*b7893ccfSSadaf Ebrahimi
13922*b7893ccfSSadaf Ebrahimi fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
13923*b7893ccfSSadaf Ebrahimi fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
13924*b7893ccfSSadaf Ebrahimi fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
13925*b7893ccfSSadaf Ebrahimi fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
13926*b7893ccfSSadaf Ebrahimi fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
13927*b7893ccfSSadaf Ebrahimi fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
13928*b7893ccfSSadaf Ebrahimi
13929*b7893ccfSSadaf Ebrahimi fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
13930*b7893ccfSSadaf Ebrahimi fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
13931*b7893ccfSSadaf Ebrahimi fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
13932*b7893ccfSSadaf Ebrahimi
13933*b7893ccfSSadaf Ebrahimi fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
13934*b7893ccfSSadaf Ebrahimi for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
13935*b7893ccfSSadaf Ebrahimi {
13936*b7893ccfSSadaf Ebrahimi fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
13937*b7893ccfSSadaf Ebrahimi fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
13938*b7893ccfSSadaf Ebrahimi }
13939*b7893ccfSSadaf Ebrahimi fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
13940*b7893ccfSSadaf Ebrahimi for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
13941*b7893ccfSSadaf Ebrahimi {
13942*b7893ccfSSadaf Ebrahimi fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
13943*b7893ccfSSadaf Ebrahimi fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
13944*b7893ccfSSadaf Ebrahimi }
13945*b7893ccfSSadaf Ebrahimi
13946*b7893ccfSSadaf Ebrahimi fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
13947*b7893ccfSSadaf Ebrahimi
13948*b7893ccfSSadaf Ebrahimi fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
13949*b7893ccfSSadaf Ebrahimi fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
13950*b7893ccfSSadaf Ebrahimi fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
13951*b7893ccfSSadaf Ebrahimi fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
13952*b7893ccfSSadaf Ebrahimi fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
13953*b7893ccfSSadaf Ebrahimi fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
13954*b7893ccfSSadaf Ebrahimi fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
13955*b7893ccfSSadaf Ebrahimi fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
13956*b7893ccfSSadaf Ebrahimi fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
13957*b7893ccfSSadaf Ebrahimi
13958*b7893ccfSSadaf Ebrahimi fprintf(m_File, "Config,End\n");
13959*b7893ccfSSadaf Ebrahimi }
13960*b7893ccfSSadaf Ebrahimi
GetBasicParams(CallParams & outParams)13961*b7893ccfSSadaf Ebrahimi void VmaRecorder::GetBasicParams(CallParams& outParams)
13962*b7893ccfSSadaf Ebrahimi {
13963*b7893ccfSSadaf Ebrahimi outParams.threadId = GetCurrentThreadId();
13964*b7893ccfSSadaf Ebrahimi
13965*b7893ccfSSadaf Ebrahimi LARGE_INTEGER counter;
13966*b7893ccfSSadaf Ebrahimi QueryPerformanceCounter(&counter);
13967*b7893ccfSSadaf Ebrahimi outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
13968*b7893ccfSSadaf Ebrahimi }
13969*b7893ccfSSadaf Ebrahimi
PrintPointerList(uint64_t count,const VmaAllocation * pItems)13970*b7893ccfSSadaf Ebrahimi void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
13971*b7893ccfSSadaf Ebrahimi {
13972*b7893ccfSSadaf Ebrahimi if(count)
13973*b7893ccfSSadaf Ebrahimi {
13974*b7893ccfSSadaf Ebrahimi fprintf(m_File, "%p", pItems[0]);
13975*b7893ccfSSadaf Ebrahimi for(uint64_t i = 1; i < count; ++i)
13976*b7893ccfSSadaf Ebrahimi {
13977*b7893ccfSSadaf Ebrahimi fprintf(m_File, " %p", pItems[i]);
13978*b7893ccfSSadaf Ebrahimi }
13979*b7893ccfSSadaf Ebrahimi }
13980*b7893ccfSSadaf Ebrahimi }
13981*b7893ccfSSadaf Ebrahimi
Flush()13982*b7893ccfSSadaf Ebrahimi void VmaRecorder::Flush()
13983*b7893ccfSSadaf Ebrahimi {
13984*b7893ccfSSadaf Ebrahimi if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
13985*b7893ccfSSadaf Ebrahimi {
13986*b7893ccfSSadaf Ebrahimi fflush(m_File);
13987*b7893ccfSSadaf Ebrahimi }
13988*b7893ccfSSadaf Ebrahimi }
13989*b7893ccfSSadaf Ebrahimi
13990*b7893ccfSSadaf Ebrahimi #endif // #if VMA_RECORDING_ENABLED
13991*b7893ccfSSadaf Ebrahimi
13992*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
13993*b7893ccfSSadaf Ebrahimi // VmaAllocator_T
13994*b7893ccfSSadaf Ebrahimi
VmaAllocator_T(const VmaAllocatorCreateInfo * pCreateInfo)13995*b7893ccfSSadaf Ebrahimi VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
13996*b7893ccfSSadaf Ebrahimi m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
13997*b7893ccfSSadaf Ebrahimi m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
13998*b7893ccfSSadaf Ebrahimi m_hDevice(pCreateInfo->device),
13999*b7893ccfSSadaf Ebrahimi m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
14000*b7893ccfSSadaf Ebrahimi m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
14001*b7893ccfSSadaf Ebrahimi *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
14002*b7893ccfSSadaf Ebrahimi m_PreferredLargeHeapBlockSize(0),
14003*b7893ccfSSadaf Ebrahimi m_PhysicalDevice(pCreateInfo->physicalDevice),
14004*b7893ccfSSadaf Ebrahimi m_CurrentFrameIndex(0),
14005*b7893ccfSSadaf Ebrahimi m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
14006*b7893ccfSSadaf Ebrahimi m_NextPoolId(0)
14007*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
14008*b7893ccfSSadaf Ebrahimi ,m_pRecorder(VMA_NULL)
14009*b7893ccfSSadaf Ebrahimi #endif
14010*b7893ccfSSadaf Ebrahimi {
14011*b7893ccfSSadaf Ebrahimi if(VMA_DEBUG_DETECT_CORRUPTION)
14012*b7893ccfSSadaf Ebrahimi {
14013*b7893ccfSSadaf Ebrahimi // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14014*b7893ccfSSadaf Ebrahimi VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14015*b7893ccfSSadaf Ebrahimi }
14016*b7893ccfSSadaf Ebrahimi
14017*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
14018*b7893ccfSSadaf Ebrahimi
14019*b7893ccfSSadaf Ebrahimi #if !(VMA_DEDICATED_ALLOCATION)
14020*b7893ccfSSadaf Ebrahimi if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0)
14021*b7893ccfSSadaf Ebrahimi {
14022*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14023*b7893ccfSSadaf Ebrahimi }
14024*b7893ccfSSadaf Ebrahimi #endif
14025*b7893ccfSSadaf Ebrahimi
14026*b7893ccfSSadaf Ebrahimi memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14027*b7893ccfSSadaf Ebrahimi memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14028*b7893ccfSSadaf Ebrahimi memset(&m_MemProps, 0, sizeof(m_MemProps));
14029*b7893ccfSSadaf Ebrahimi
14030*b7893ccfSSadaf Ebrahimi memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14031*b7893ccfSSadaf Ebrahimi memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14032*b7893ccfSSadaf Ebrahimi
14033*b7893ccfSSadaf Ebrahimi for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14034*b7893ccfSSadaf Ebrahimi {
14035*b7893ccfSSadaf Ebrahimi m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
14036*b7893ccfSSadaf Ebrahimi }
14037*b7893ccfSSadaf Ebrahimi
14038*b7893ccfSSadaf Ebrahimi if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14039*b7893ccfSSadaf Ebrahimi {
14040*b7893ccfSSadaf Ebrahimi m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14041*b7893ccfSSadaf Ebrahimi m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14042*b7893ccfSSadaf Ebrahimi }
14043*b7893ccfSSadaf Ebrahimi
14044*b7893ccfSSadaf Ebrahimi ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14045*b7893ccfSSadaf Ebrahimi
14046*b7893ccfSSadaf Ebrahimi (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14047*b7893ccfSSadaf Ebrahimi (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14048*b7893ccfSSadaf Ebrahimi
14049*b7893ccfSSadaf Ebrahimi VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14050*b7893ccfSSadaf Ebrahimi VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14051*b7893ccfSSadaf Ebrahimi VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14052*b7893ccfSSadaf Ebrahimi VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14053*b7893ccfSSadaf Ebrahimi
14054*b7893ccfSSadaf Ebrahimi m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14055*b7893ccfSSadaf Ebrahimi pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14056*b7893ccfSSadaf Ebrahimi
14057*b7893ccfSSadaf Ebrahimi if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14058*b7893ccfSSadaf Ebrahimi {
14059*b7893ccfSSadaf Ebrahimi for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14060*b7893ccfSSadaf Ebrahimi {
14061*b7893ccfSSadaf Ebrahimi const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14062*b7893ccfSSadaf Ebrahimi if(limit != VK_WHOLE_SIZE)
14063*b7893ccfSSadaf Ebrahimi {
14064*b7893ccfSSadaf Ebrahimi m_HeapSizeLimit[heapIndex] = limit;
14065*b7893ccfSSadaf Ebrahimi if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14066*b7893ccfSSadaf Ebrahimi {
14067*b7893ccfSSadaf Ebrahimi m_MemProps.memoryHeaps[heapIndex].size = limit;
14068*b7893ccfSSadaf Ebrahimi }
14069*b7893ccfSSadaf Ebrahimi }
14070*b7893ccfSSadaf Ebrahimi }
14071*b7893ccfSSadaf Ebrahimi }
14072*b7893ccfSSadaf Ebrahimi
14073*b7893ccfSSadaf Ebrahimi for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14074*b7893ccfSSadaf Ebrahimi {
14075*b7893ccfSSadaf Ebrahimi const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14076*b7893ccfSSadaf Ebrahimi
14077*b7893ccfSSadaf Ebrahimi m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14078*b7893ccfSSadaf Ebrahimi this,
14079*b7893ccfSSadaf Ebrahimi memTypeIndex,
14080*b7893ccfSSadaf Ebrahimi preferredBlockSize,
14081*b7893ccfSSadaf Ebrahimi 0,
14082*b7893ccfSSadaf Ebrahimi SIZE_MAX,
14083*b7893ccfSSadaf Ebrahimi GetBufferImageGranularity(),
14084*b7893ccfSSadaf Ebrahimi pCreateInfo->frameInUseCount,
14085*b7893ccfSSadaf Ebrahimi false, // isCustomPool
14086*b7893ccfSSadaf Ebrahimi false, // explicitBlockSize
14087*b7893ccfSSadaf Ebrahimi false); // linearAlgorithm
14088*b7893ccfSSadaf Ebrahimi // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14089*b7893ccfSSadaf Ebrahimi // becase minBlockCount is 0.
14090*b7893ccfSSadaf Ebrahimi m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14091*b7893ccfSSadaf Ebrahimi
14092*b7893ccfSSadaf Ebrahimi }
14093*b7893ccfSSadaf Ebrahimi }
14094*b7893ccfSSadaf Ebrahimi
Init(const VmaAllocatorCreateInfo * pCreateInfo)14095*b7893ccfSSadaf Ebrahimi VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14096*b7893ccfSSadaf Ebrahimi {
14097*b7893ccfSSadaf Ebrahimi VkResult res = VK_SUCCESS;
14098*b7893ccfSSadaf Ebrahimi
14099*b7893ccfSSadaf Ebrahimi if(pCreateInfo->pRecordSettings != VMA_NULL &&
14100*b7893ccfSSadaf Ebrahimi !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14101*b7893ccfSSadaf Ebrahimi {
14102*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
14103*b7893ccfSSadaf Ebrahimi m_pRecorder = vma_new(this, VmaRecorder)();
14104*b7893ccfSSadaf Ebrahimi res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14105*b7893ccfSSadaf Ebrahimi if(res != VK_SUCCESS)
14106*b7893ccfSSadaf Ebrahimi {
14107*b7893ccfSSadaf Ebrahimi return res;
14108*b7893ccfSSadaf Ebrahimi }
14109*b7893ccfSSadaf Ebrahimi m_pRecorder->WriteConfiguration(
14110*b7893ccfSSadaf Ebrahimi m_PhysicalDeviceProperties,
14111*b7893ccfSSadaf Ebrahimi m_MemProps,
14112*b7893ccfSSadaf Ebrahimi m_UseKhrDedicatedAllocation);
14113*b7893ccfSSadaf Ebrahimi m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14114*b7893ccfSSadaf Ebrahimi #else
14115*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14116*b7893ccfSSadaf Ebrahimi return VK_ERROR_FEATURE_NOT_PRESENT;
14117*b7893ccfSSadaf Ebrahimi #endif
14118*b7893ccfSSadaf Ebrahimi }
14119*b7893ccfSSadaf Ebrahimi
14120*b7893ccfSSadaf Ebrahimi return res;
14121*b7893ccfSSadaf Ebrahimi }
14122*b7893ccfSSadaf Ebrahimi
~VmaAllocator_T()14123*b7893ccfSSadaf Ebrahimi VmaAllocator_T::~VmaAllocator_T()
14124*b7893ccfSSadaf Ebrahimi {
14125*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
14126*b7893ccfSSadaf Ebrahimi if(m_pRecorder != VMA_NULL)
14127*b7893ccfSSadaf Ebrahimi {
14128*b7893ccfSSadaf Ebrahimi m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14129*b7893ccfSSadaf Ebrahimi vma_delete(this, m_pRecorder);
14130*b7893ccfSSadaf Ebrahimi }
14131*b7893ccfSSadaf Ebrahimi #endif
14132*b7893ccfSSadaf Ebrahimi
14133*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_Pools.empty());
14134*b7893ccfSSadaf Ebrahimi
14135*b7893ccfSSadaf Ebrahimi for(size_t i = GetMemoryTypeCount(); i--; )
14136*b7893ccfSSadaf Ebrahimi {
14137*b7893ccfSSadaf Ebrahimi vma_delete(this, m_pDedicatedAllocations[i]);
14138*b7893ccfSSadaf Ebrahimi vma_delete(this, m_pBlockVectors[i]);
14139*b7893ccfSSadaf Ebrahimi }
14140*b7893ccfSSadaf Ebrahimi }
14141*b7893ccfSSadaf Ebrahimi
ImportVulkanFunctions(const VmaVulkanFunctions * pVulkanFunctions)14142*b7893ccfSSadaf Ebrahimi void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14143*b7893ccfSSadaf Ebrahimi {
14144*b7893ccfSSadaf Ebrahimi #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14145*b7893ccfSSadaf Ebrahimi m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
14146*b7893ccfSSadaf Ebrahimi m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
14147*b7893ccfSSadaf Ebrahimi m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
14148*b7893ccfSSadaf Ebrahimi m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
14149*b7893ccfSSadaf Ebrahimi m_VulkanFunctions.vkMapMemory = &vkMapMemory;
14150*b7893ccfSSadaf Ebrahimi m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
14151*b7893ccfSSadaf Ebrahimi m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
14152*b7893ccfSSadaf Ebrahimi m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
14153*b7893ccfSSadaf Ebrahimi m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
14154*b7893ccfSSadaf Ebrahimi m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
14155*b7893ccfSSadaf Ebrahimi m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
14156*b7893ccfSSadaf Ebrahimi m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
14157*b7893ccfSSadaf Ebrahimi m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
14158*b7893ccfSSadaf Ebrahimi m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
14159*b7893ccfSSadaf Ebrahimi m_VulkanFunctions.vkCreateImage = &vkCreateImage;
14160*b7893ccfSSadaf Ebrahimi m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
14161*b7893ccfSSadaf Ebrahimi m_VulkanFunctions.vkCmdCopyBuffer = &vkCmdCopyBuffer;
14162*b7893ccfSSadaf Ebrahimi #if VMA_DEDICATED_ALLOCATION
14163*b7893ccfSSadaf Ebrahimi if(m_UseKhrDedicatedAllocation)
14164*b7893ccfSSadaf Ebrahimi {
14165*b7893ccfSSadaf Ebrahimi m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14166*b7893ccfSSadaf Ebrahimi (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14167*b7893ccfSSadaf Ebrahimi m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14168*b7893ccfSSadaf Ebrahimi (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14169*b7893ccfSSadaf Ebrahimi }
14170*b7893ccfSSadaf Ebrahimi #endif // #if VMA_DEDICATED_ALLOCATION
14171*b7893ccfSSadaf Ebrahimi #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14172*b7893ccfSSadaf Ebrahimi
14173*b7893ccfSSadaf Ebrahimi #define VMA_COPY_IF_NOT_NULL(funcName) \
14174*b7893ccfSSadaf Ebrahimi if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14175*b7893ccfSSadaf Ebrahimi
14176*b7893ccfSSadaf Ebrahimi if(pVulkanFunctions != VMA_NULL)
14177*b7893ccfSSadaf Ebrahimi {
14178*b7893ccfSSadaf Ebrahimi VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14179*b7893ccfSSadaf Ebrahimi VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14180*b7893ccfSSadaf Ebrahimi VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14181*b7893ccfSSadaf Ebrahimi VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14182*b7893ccfSSadaf Ebrahimi VMA_COPY_IF_NOT_NULL(vkMapMemory);
14183*b7893ccfSSadaf Ebrahimi VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14184*b7893ccfSSadaf Ebrahimi VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14185*b7893ccfSSadaf Ebrahimi VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14186*b7893ccfSSadaf Ebrahimi VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14187*b7893ccfSSadaf Ebrahimi VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14188*b7893ccfSSadaf Ebrahimi VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14189*b7893ccfSSadaf Ebrahimi VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14190*b7893ccfSSadaf Ebrahimi VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14191*b7893ccfSSadaf Ebrahimi VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14192*b7893ccfSSadaf Ebrahimi VMA_COPY_IF_NOT_NULL(vkCreateImage);
14193*b7893ccfSSadaf Ebrahimi VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14194*b7893ccfSSadaf Ebrahimi VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14195*b7893ccfSSadaf Ebrahimi #if VMA_DEDICATED_ALLOCATION
14196*b7893ccfSSadaf Ebrahimi VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14197*b7893ccfSSadaf Ebrahimi VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14198*b7893ccfSSadaf Ebrahimi #endif
14199*b7893ccfSSadaf Ebrahimi }
14200*b7893ccfSSadaf Ebrahimi
14201*b7893ccfSSadaf Ebrahimi #undef VMA_COPY_IF_NOT_NULL
14202*b7893ccfSSadaf Ebrahimi
14203*b7893ccfSSadaf Ebrahimi // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14204*b7893ccfSSadaf Ebrahimi // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14205*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14206*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14207*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14208*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14209*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14210*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14211*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14212*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14213*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14214*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14215*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14216*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14217*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14218*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14219*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14220*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14221*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14222*b7893ccfSSadaf Ebrahimi #if VMA_DEDICATED_ALLOCATION
14223*b7893ccfSSadaf Ebrahimi if(m_UseKhrDedicatedAllocation)
14224*b7893ccfSSadaf Ebrahimi {
14225*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14226*b7893ccfSSadaf Ebrahimi VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14227*b7893ccfSSadaf Ebrahimi }
14228*b7893ccfSSadaf Ebrahimi #endif
14229*b7893ccfSSadaf Ebrahimi }
14230*b7893ccfSSadaf Ebrahimi
CalcPreferredBlockSize(uint32_t memTypeIndex)14231*b7893ccfSSadaf Ebrahimi VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14232*b7893ccfSSadaf Ebrahimi {
14233*b7893ccfSSadaf Ebrahimi const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14234*b7893ccfSSadaf Ebrahimi const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14235*b7893ccfSSadaf Ebrahimi const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14236*b7893ccfSSadaf Ebrahimi return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14237*b7893ccfSSadaf Ebrahimi }
14238*b7893ccfSSadaf Ebrahimi
AllocateMemoryOfType(VkDeviceSize size,VkDeviceSize alignment,bool dedicatedAllocation,VkBuffer dedicatedBuffer,VkImage dedicatedImage,const VmaAllocationCreateInfo & createInfo,uint32_t memTypeIndex,VmaSuballocationType suballocType,size_t allocationCount,VmaAllocation * pAllocations)14239*b7893ccfSSadaf Ebrahimi VkResult VmaAllocator_T::AllocateMemoryOfType(
14240*b7893ccfSSadaf Ebrahimi VkDeviceSize size,
14241*b7893ccfSSadaf Ebrahimi VkDeviceSize alignment,
14242*b7893ccfSSadaf Ebrahimi bool dedicatedAllocation,
14243*b7893ccfSSadaf Ebrahimi VkBuffer dedicatedBuffer,
14244*b7893ccfSSadaf Ebrahimi VkImage dedicatedImage,
14245*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo& createInfo,
14246*b7893ccfSSadaf Ebrahimi uint32_t memTypeIndex,
14247*b7893ccfSSadaf Ebrahimi VmaSuballocationType suballocType,
14248*b7893ccfSSadaf Ebrahimi size_t allocationCount,
14249*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocations)
14250*b7893ccfSSadaf Ebrahimi {
14251*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pAllocations != VMA_NULL);
14252*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, vkMemReq.size);
14253*b7893ccfSSadaf Ebrahimi
14254*b7893ccfSSadaf Ebrahimi VmaAllocationCreateInfo finalCreateInfo = createInfo;
14255*b7893ccfSSadaf Ebrahimi
14256*b7893ccfSSadaf Ebrahimi // If memory type is not HOST_VISIBLE, disable MAPPED.
14257*b7893ccfSSadaf Ebrahimi if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14258*b7893ccfSSadaf Ebrahimi (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14259*b7893ccfSSadaf Ebrahimi {
14260*b7893ccfSSadaf Ebrahimi finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14261*b7893ccfSSadaf Ebrahimi }
14262*b7893ccfSSadaf Ebrahimi
14263*b7893ccfSSadaf Ebrahimi VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14264*b7893ccfSSadaf Ebrahimi VMA_ASSERT(blockVector);
14265*b7893ccfSSadaf Ebrahimi
14266*b7893ccfSSadaf Ebrahimi const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14267*b7893ccfSSadaf Ebrahimi bool preferDedicatedMemory =
14268*b7893ccfSSadaf Ebrahimi VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14269*b7893ccfSSadaf Ebrahimi dedicatedAllocation ||
14270*b7893ccfSSadaf Ebrahimi // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14271*b7893ccfSSadaf Ebrahimi size > preferredBlockSize / 2;
14272*b7893ccfSSadaf Ebrahimi
14273*b7893ccfSSadaf Ebrahimi if(preferDedicatedMemory &&
14274*b7893ccfSSadaf Ebrahimi (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14275*b7893ccfSSadaf Ebrahimi finalCreateInfo.pool == VK_NULL_HANDLE)
14276*b7893ccfSSadaf Ebrahimi {
14277*b7893ccfSSadaf Ebrahimi finalCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
14278*b7893ccfSSadaf Ebrahimi }
14279*b7893ccfSSadaf Ebrahimi
14280*b7893ccfSSadaf Ebrahimi if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14281*b7893ccfSSadaf Ebrahimi {
14282*b7893ccfSSadaf Ebrahimi if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14283*b7893ccfSSadaf Ebrahimi {
14284*b7893ccfSSadaf Ebrahimi return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14285*b7893ccfSSadaf Ebrahimi }
14286*b7893ccfSSadaf Ebrahimi else
14287*b7893ccfSSadaf Ebrahimi {
14288*b7893ccfSSadaf Ebrahimi return AllocateDedicatedMemory(
14289*b7893ccfSSadaf Ebrahimi size,
14290*b7893ccfSSadaf Ebrahimi suballocType,
14291*b7893ccfSSadaf Ebrahimi memTypeIndex,
14292*b7893ccfSSadaf Ebrahimi (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14293*b7893ccfSSadaf Ebrahimi (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14294*b7893ccfSSadaf Ebrahimi finalCreateInfo.pUserData,
14295*b7893ccfSSadaf Ebrahimi dedicatedBuffer,
14296*b7893ccfSSadaf Ebrahimi dedicatedImage,
14297*b7893ccfSSadaf Ebrahimi allocationCount,
14298*b7893ccfSSadaf Ebrahimi pAllocations);
14299*b7893ccfSSadaf Ebrahimi }
14300*b7893ccfSSadaf Ebrahimi }
14301*b7893ccfSSadaf Ebrahimi else
14302*b7893ccfSSadaf Ebrahimi {
14303*b7893ccfSSadaf Ebrahimi VkResult res = blockVector->Allocate(
14304*b7893ccfSSadaf Ebrahimi VK_NULL_HANDLE, // hCurrentPool
14305*b7893ccfSSadaf Ebrahimi m_CurrentFrameIndex.load(),
14306*b7893ccfSSadaf Ebrahimi size,
14307*b7893ccfSSadaf Ebrahimi alignment,
14308*b7893ccfSSadaf Ebrahimi finalCreateInfo,
14309*b7893ccfSSadaf Ebrahimi suballocType,
14310*b7893ccfSSadaf Ebrahimi allocationCount,
14311*b7893ccfSSadaf Ebrahimi pAllocations);
14312*b7893ccfSSadaf Ebrahimi if(res == VK_SUCCESS)
14313*b7893ccfSSadaf Ebrahimi {
14314*b7893ccfSSadaf Ebrahimi return res;
14315*b7893ccfSSadaf Ebrahimi }
14316*b7893ccfSSadaf Ebrahimi
14317*b7893ccfSSadaf Ebrahimi // 5. Try dedicated memory.
14318*b7893ccfSSadaf Ebrahimi if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14319*b7893ccfSSadaf Ebrahimi {
14320*b7893ccfSSadaf Ebrahimi return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14321*b7893ccfSSadaf Ebrahimi }
14322*b7893ccfSSadaf Ebrahimi else
14323*b7893ccfSSadaf Ebrahimi {
14324*b7893ccfSSadaf Ebrahimi res = AllocateDedicatedMemory(
14325*b7893ccfSSadaf Ebrahimi size,
14326*b7893ccfSSadaf Ebrahimi suballocType,
14327*b7893ccfSSadaf Ebrahimi memTypeIndex,
14328*b7893ccfSSadaf Ebrahimi (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14329*b7893ccfSSadaf Ebrahimi (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14330*b7893ccfSSadaf Ebrahimi finalCreateInfo.pUserData,
14331*b7893ccfSSadaf Ebrahimi dedicatedBuffer,
14332*b7893ccfSSadaf Ebrahimi dedicatedImage,
14333*b7893ccfSSadaf Ebrahimi allocationCount,
14334*b7893ccfSSadaf Ebrahimi pAllocations);
14335*b7893ccfSSadaf Ebrahimi if(res == VK_SUCCESS)
14336*b7893ccfSSadaf Ebrahimi {
14337*b7893ccfSSadaf Ebrahimi // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14338*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14339*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
14340*b7893ccfSSadaf Ebrahimi }
14341*b7893ccfSSadaf Ebrahimi else
14342*b7893ccfSSadaf Ebrahimi {
14343*b7893ccfSSadaf Ebrahimi // Everything failed: Return error code.
14344*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14345*b7893ccfSSadaf Ebrahimi return res;
14346*b7893ccfSSadaf Ebrahimi }
14347*b7893ccfSSadaf Ebrahimi }
14348*b7893ccfSSadaf Ebrahimi }
14349*b7893ccfSSadaf Ebrahimi }
14350*b7893ccfSSadaf Ebrahimi
AllocateDedicatedMemory(VkDeviceSize size,VmaSuballocationType suballocType,uint32_t memTypeIndex,bool map,bool isUserDataString,void * pUserData,VkBuffer dedicatedBuffer,VkImage dedicatedImage,size_t allocationCount,VmaAllocation * pAllocations)14351*b7893ccfSSadaf Ebrahimi VkResult VmaAllocator_T::AllocateDedicatedMemory(
14352*b7893ccfSSadaf Ebrahimi VkDeviceSize size,
14353*b7893ccfSSadaf Ebrahimi VmaSuballocationType suballocType,
14354*b7893ccfSSadaf Ebrahimi uint32_t memTypeIndex,
14355*b7893ccfSSadaf Ebrahimi bool map,
14356*b7893ccfSSadaf Ebrahimi bool isUserDataString,
14357*b7893ccfSSadaf Ebrahimi void* pUserData,
14358*b7893ccfSSadaf Ebrahimi VkBuffer dedicatedBuffer,
14359*b7893ccfSSadaf Ebrahimi VkImage dedicatedImage,
14360*b7893ccfSSadaf Ebrahimi size_t allocationCount,
14361*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocations)
14362*b7893ccfSSadaf Ebrahimi {
14363*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocationCount > 0 && pAllocations);
14364*b7893ccfSSadaf Ebrahimi
14365*b7893ccfSSadaf Ebrahimi VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14366*b7893ccfSSadaf Ebrahimi allocInfo.memoryTypeIndex = memTypeIndex;
14367*b7893ccfSSadaf Ebrahimi allocInfo.allocationSize = size;
14368*b7893ccfSSadaf Ebrahimi
14369*b7893ccfSSadaf Ebrahimi #if VMA_DEDICATED_ALLOCATION
14370*b7893ccfSSadaf Ebrahimi VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14371*b7893ccfSSadaf Ebrahimi if(m_UseKhrDedicatedAllocation)
14372*b7893ccfSSadaf Ebrahimi {
14373*b7893ccfSSadaf Ebrahimi if(dedicatedBuffer != VK_NULL_HANDLE)
14374*b7893ccfSSadaf Ebrahimi {
14375*b7893ccfSSadaf Ebrahimi VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14376*b7893ccfSSadaf Ebrahimi dedicatedAllocInfo.buffer = dedicatedBuffer;
14377*b7893ccfSSadaf Ebrahimi allocInfo.pNext = &dedicatedAllocInfo;
14378*b7893ccfSSadaf Ebrahimi }
14379*b7893ccfSSadaf Ebrahimi else if(dedicatedImage != VK_NULL_HANDLE)
14380*b7893ccfSSadaf Ebrahimi {
14381*b7893ccfSSadaf Ebrahimi dedicatedAllocInfo.image = dedicatedImage;
14382*b7893ccfSSadaf Ebrahimi allocInfo.pNext = &dedicatedAllocInfo;
14383*b7893ccfSSadaf Ebrahimi }
14384*b7893ccfSSadaf Ebrahimi }
14385*b7893ccfSSadaf Ebrahimi #endif // #if VMA_DEDICATED_ALLOCATION
14386*b7893ccfSSadaf Ebrahimi
14387*b7893ccfSSadaf Ebrahimi size_t allocIndex;
14388*b7893ccfSSadaf Ebrahimi VkResult res = VK_SUCCESS;
14389*b7893ccfSSadaf Ebrahimi for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14390*b7893ccfSSadaf Ebrahimi {
14391*b7893ccfSSadaf Ebrahimi res = AllocateDedicatedMemoryPage(
14392*b7893ccfSSadaf Ebrahimi size,
14393*b7893ccfSSadaf Ebrahimi suballocType,
14394*b7893ccfSSadaf Ebrahimi memTypeIndex,
14395*b7893ccfSSadaf Ebrahimi allocInfo,
14396*b7893ccfSSadaf Ebrahimi map,
14397*b7893ccfSSadaf Ebrahimi isUserDataString,
14398*b7893ccfSSadaf Ebrahimi pUserData,
14399*b7893ccfSSadaf Ebrahimi pAllocations + allocIndex);
14400*b7893ccfSSadaf Ebrahimi if(res != VK_SUCCESS)
14401*b7893ccfSSadaf Ebrahimi {
14402*b7893ccfSSadaf Ebrahimi break;
14403*b7893ccfSSadaf Ebrahimi }
14404*b7893ccfSSadaf Ebrahimi }
14405*b7893ccfSSadaf Ebrahimi
14406*b7893ccfSSadaf Ebrahimi if(res == VK_SUCCESS)
14407*b7893ccfSSadaf Ebrahimi {
14408*b7893ccfSSadaf Ebrahimi // Register them in m_pDedicatedAllocations.
14409*b7893ccfSSadaf Ebrahimi {
14410*b7893ccfSSadaf Ebrahimi VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14411*b7893ccfSSadaf Ebrahimi AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14412*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pDedicatedAllocations);
14413*b7893ccfSSadaf Ebrahimi for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14414*b7893ccfSSadaf Ebrahimi {
14415*b7893ccfSSadaf Ebrahimi VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14416*b7893ccfSSadaf Ebrahimi }
14417*b7893ccfSSadaf Ebrahimi }
14418*b7893ccfSSadaf Ebrahimi
14419*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14420*b7893ccfSSadaf Ebrahimi }
14421*b7893ccfSSadaf Ebrahimi else
14422*b7893ccfSSadaf Ebrahimi {
14423*b7893ccfSSadaf Ebrahimi // Free all already created allocations.
14424*b7893ccfSSadaf Ebrahimi while(allocIndex--)
14425*b7893ccfSSadaf Ebrahimi {
14426*b7893ccfSSadaf Ebrahimi VmaAllocation currAlloc = pAllocations[allocIndex];
14427*b7893ccfSSadaf Ebrahimi VkDeviceMemory hMemory = currAlloc->GetMemory();
14428*b7893ccfSSadaf Ebrahimi
14429*b7893ccfSSadaf Ebrahimi /*
14430*b7893ccfSSadaf Ebrahimi There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14431*b7893ccfSSadaf Ebrahimi before vkFreeMemory.
14432*b7893ccfSSadaf Ebrahimi
14433*b7893ccfSSadaf Ebrahimi if(currAlloc->GetMappedData() != VMA_NULL)
14434*b7893ccfSSadaf Ebrahimi {
14435*b7893ccfSSadaf Ebrahimi (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14436*b7893ccfSSadaf Ebrahimi }
14437*b7893ccfSSadaf Ebrahimi */
14438*b7893ccfSSadaf Ebrahimi
14439*b7893ccfSSadaf Ebrahimi FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14440*b7893ccfSSadaf Ebrahimi
14441*b7893ccfSSadaf Ebrahimi currAlloc->SetUserData(this, VMA_NULL);
14442*b7893ccfSSadaf Ebrahimi vma_delete(this, currAlloc);
14443*b7893ccfSSadaf Ebrahimi }
14444*b7893ccfSSadaf Ebrahimi
14445*b7893ccfSSadaf Ebrahimi memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14446*b7893ccfSSadaf Ebrahimi }
14447*b7893ccfSSadaf Ebrahimi
14448*b7893ccfSSadaf Ebrahimi return res;
14449*b7893ccfSSadaf Ebrahimi }
14450*b7893ccfSSadaf Ebrahimi
AllocateDedicatedMemoryPage(VkDeviceSize size,VmaSuballocationType suballocType,uint32_t memTypeIndex,const VkMemoryAllocateInfo & allocInfo,bool map,bool isUserDataString,void * pUserData,VmaAllocation * pAllocation)14451*b7893ccfSSadaf Ebrahimi VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14452*b7893ccfSSadaf Ebrahimi VkDeviceSize size,
14453*b7893ccfSSadaf Ebrahimi VmaSuballocationType suballocType,
14454*b7893ccfSSadaf Ebrahimi uint32_t memTypeIndex,
14455*b7893ccfSSadaf Ebrahimi const VkMemoryAllocateInfo& allocInfo,
14456*b7893ccfSSadaf Ebrahimi bool map,
14457*b7893ccfSSadaf Ebrahimi bool isUserDataString,
14458*b7893ccfSSadaf Ebrahimi void* pUserData,
14459*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocation)
14460*b7893ccfSSadaf Ebrahimi {
14461*b7893ccfSSadaf Ebrahimi VkDeviceMemory hMemory = VK_NULL_HANDLE;
14462*b7893ccfSSadaf Ebrahimi VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14463*b7893ccfSSadaf Ebrahimi if(res < 0)
14464*b7893ccfSSadaf Ebrahimi {
14465*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14466*b7893ccfSSadaf Ebrahimi return res;
14467*b7893ccfSSadaf Ebrahimi }
14468*b7893ccfSSadaf Ebrahimi
14469*b7893ccfSSadaf Ebrahimi void* pMappedData = VMA_NULL;
14470*b7893ccfSSadaf Ebrahimi if(map)
14471*b7893ccfSSadaf Ebrahimi {
14472*b7893ccfSSadaf Ebrahimi res = (*m_VulkanFunctions.vkMapMemory)(
14473*b7893ccfSSadaf Ebrahimi m_hDevice,
14474*b7893ccfSSadaf Ebrahimi hMemory,
14475*b7893ccfSSadaf Ebrahimi 0,
14476*b7893ccfSSadaf Ebrahimi VK_WHOLE_SIZE,
14477*b7893ccfSSadaf Ebrahimi 0,
14478*b7893ccfSSadaf Ebrahimi &pMappedData);
14479*b7893ccfSSadaf Ebrahimi if(res < 0)
14480*b7893ccfSSadaf Ebrahimi {
14481*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG(" vkMapMemory FAILED");
14482*b7893ccfSSadaf Ebrahimi FreeVulkanMemory(memTypeIndex, size, hMemory);
14483*b7893ccfSSadaf Ebrahimi return res;
14484*b7893ccfSSadaf Ebrahimi }
14485*b7893ccfSSadaf Ebrahimi }
14486*b7893ccfSSadaf Ebrahimi
14487*b7893ccfSSadaf Ebrahimi *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
14488*b7893ccfSSadaf Ebrahimi (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14489*b7893ccfSSadaf Ebrahimi (*pAllocation)->SetUserData(this, pUserData);
14490*b7893ccfSSadaf Ebrahimi if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14491*b7893ccfSSadaf Ebrahimi {
14492*b7893ccfSSadaf Ebrahimi FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14493*b7893ccfSSadaf Ebrahimi }
14494*b7893ccfSSadaf Ebrahimi
14495*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
14496*b7893ccfSSadaf Ebrahimi }
14497*b7893ccfSSadaf Ebrahimi
GetBufferMemoryRequirements(VkBuffer hBuffer,VkMemoryRequirements & memReq,bool & requiresDedicatedAllocation,bool & prefersDedicatedAllocation)14498*b7893ccfSSadaf Ebrahimi void VmaAllocator_T::GetBufferMemoryRequirements(
14499*b7893ccfSSadaf Ebrahimi VkBuffer hBuffer,
14500*b7893ccfSSadaf Ebrahimi VkMemoryRequirements& memReq,
14501*b7893ccfSSadaf Ebrahimi bool& requiresDedicatedAllocation,
14502*b7893ccfSSadaf Ebrahimi bool& prefersDedicatedAllocation) const
14503*b7893ccfSSadaf Ebrahimi {
14504*b7893ccfSSadaf Ebrahimi #if VMA_DEDICATED_ALLOCATION
14505*b7893ccfSSadaf Ebrahimi if(m_UseKhrDedicatedAllocation)
14506*b7893ccfSSadaf Ebrahimi {
14507*b7893ccfSSadaf Ebrahimi VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14508*b7893ccfSSadaf Ebrahimi memReqInfo.buffer = hBuffer;
14509*b7893ccfSSadaf Ebrahimi
14510*b7893ccfSSadaf Ebrahimi VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14511*b7893ccfSSadaf Ebrahimi
14512*b7893ccfSSadaf Ebrahimi VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14513*b7893ccfSSadaf Ebrahimi memReq2.pNext = &memDedicatedReq;
14514*b7893ccfSSadaf Ebrahimi
14515*b7893ccfSSadaf Ebrahimi (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14516*b7893ccfSSadaf Ebrahimi
14517*b7893ccfSSadaf Ebrahimi memReq = memReq2.memoryRequirements;
14518*b7893ccfSSadaf Ebrahimi requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14519*b7893ccfSSadaf Ebrahimi prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14520*b7893ccfSSadaf Ebrahimi }
14521*b7893ccfSSadaf Ebrahimi else
14522*b7893ccfSSadaf Ebrahimi #endif // #if VMA_DEDICATED_ALLOCATION
14523*b7893ccfSSadaf Ebrahimi {
14524*b7893ccfSSadaf Ebrahimi (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14525*b7893ccfSSadaf Ebrahimi requiresDedicatedAllocation = false;
14526*b7893ccfSSadaf Ebrahimi prefersDedicatedAllocation = false;
14527*b7893ccfSSadaf Ebrahimi }
14528*b7893ccfSSadaf Ebrahimi }
14529*b7893ccfSSadaf Ebrahimi
GetImageMemoryRequirements(VkImage hImage,VkMemoryRequirements & memReq,bool & requiresDedicatedAllocation,bool & prefersDedicatedAllocation)14530*b7893ccfSSadaf Ebrahimi void VmaAllocator_T::GetImageMemoryRequirements(
14531*b7893ccfSSadaf Ebrahimi VkImage hImage,
14532*b7893ccfSSadaf Ebrahimi VkMemoryRequirements& memReq,
14533*b7893ccfSSadaf Ebrahimi bool& requiresDedicatedAllocation,
14534*b7893ccfSSadaf Ebrahimi bool& prefersDedicatedAllocation) const
14535*b7893ccfSSadaf Ebrahimi {
14536*b7893ccfSSadaf Ebrahimi #if VMA_DEDICATED_ALLOCATION
14537*b7893ccfSSadaf Ebrahimi if(m_UseKhrDedicatedAllocation)
14538*b7893ccfSSadaf Ebrahimi {
14539*b7893ccfSSadaf Ebrahimi VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14540*b7893ccfSSadaf Ebrahimi memReqInfo.image = hImage;
14541*b7893ccfSSadaf Ebrahimi
14542*b7893ccfSSadaf Ebrahimi VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14543*b7893ccfSSadaf Ebrahimi
14544*b7893ccfSSadaf Ebrahimi VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14545*b7893ccfSSadaf Ebrahimi memReq2.pNext = &memDedicatedReq;
14546*b7893ccfSSadaf Ebrahimi
14547*b7893ccfSSadaf Ebrahimi (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14548*b7893ccfSSadaf Ebrahimi
14549*b7893ccfSSadaf Ebrahimi memReq = memReq2.memoryRequirements;
14550*b7893ccfSSadaf Ebrahimi requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14551*b7893ccfSSadaf Ebrahimi prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14552*b7893ccfSSadaf Ebrahimi }
14553*b7893ccfSSadaf Ebrahimi else
14554*b7893ccfSSadaf Ebrahimi #endif // #if VMA_DEDICATED_ALLOCATION
14555*b7893ccfSSadaf Ebrahimi {
14556*b7893ccfSSadaf Ebrahimi (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14557*b7893ccfSSadaf Ebrahimi requiresDedicatedAllocation = false;
14558*b7893ccfSSadaf Ebrahimi prefersDedicatedAllocation = false;
14559*b7893ccfSSadaf Ebrahimi }
14560*b7893ccfSSadaf Ebrahimi }
14561*b7893ccfSSadaf Ebrahimi
AllocateMemory(const VkMemoryRequirements & vkMemReq,bool requiresDedicatedAllocation,bool prefersDedicatedAllocation,VkBuffer dedicatedBuffer,VkImage dedicatedImage,const VmaAllocationCreateInfo & createInfo,VmaSuballocationType suballocType,size_t allocationCount,VmaAllocation * pAllocations)14562*b7893ccfSSadaf Ebrahimi VkResult VmaAllocator_T::AllocateMemory(
14563*b7893ccfSSadaf Ebrahimi const VkMemoryRequirements& vkMemReq,
14564*b7893ccfSSadaf Ebrahimi bool requiresDedicatedAllocation,
14565*b7893ccfSSadaf Ebrahimi bool prefersDedicatedAllocation,
14566*b7893ccfSSadaf Ebrahimi VkBuffer dedicatedBuffer,
14567*b7893ccfSSadaf Ebrahimi VkImage dedicatedImage,
14568*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo& createInfo,
14569*b7893ccfSSadaf Ebrahimi VmaSuballocationType suballocType,
14570*b7893ccfSSadaf Ebrahimi size_t allocationCount,
14571*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocations)
14572*b7893ccfSSadaf Ebrahimi {
14573*b7893ccfSSadaf Ebrahimi memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14574*b7893ccfSSadaf Ebrahimi
14575*b7893ccfSSadaf Ebrahimi VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14576*b7893ccfSSadaf Ebrahimi
14577*b7893ccfSSadaf Ebrahimi if(vkMemReq.size == 0)
14578*b7893ccfSSadaf Ebrahimi {
14579*b7893ccfSSadaf Ebrahimi return VK_ERROR_VALIDATION_FAILED_EXT;
14580*b7893ccfSSadaf Ebrahimi }
14581*b7893ccfSSadaf Ebrahimi if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14582*b7893ccfSSadaf Ebrahimi (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14583*b7893ccfSSadaf Ebrahimi {
14584*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14585*b7893ccfSSadaf Ebrahimi return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14586*b7893ccfSSadaf Ebrahimi }
14587*b7893ccfSSadaf Ebrahimi if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14588*b7893ccfSSadaf Ebrahimi (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0)
14589*b7893ccfSSadaf Ebrahimi {
14590*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14591*b7893ccfSSadaf Ebrahimi return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14592*b7893ccfSSadaf Ebrahimi }
14593*b7893ccfSSadaf Ebrahimi if(requiresDedicatedAllocation)
14594*b7893ccfSSadaf Ebrahimi {
14595*b7893ccfSSadaf Ebrahimi if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14596*b7893ccfSSadaf Ebrahimi {
14597*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14598*b7893ccfSSadaf Ebrahimi return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14599*b7893ccfSSadaf Ebrahimi }
14600*b7893ccfSSadaf Ebrahimi if(createInfo.pool != VK_NULL_HANDLE)
14601*b7893ccfSSadaf Ebrahimi {
14602*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14603*b7893ccfSSadaf Ebrahimi return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14604*b7893ccfSSadaf Ebrahimi }
14605*b7893ccfSSadaf Ebrahimi }
14606*b7893ccfSSadaf Ebrahimi if((createInfo.pool != VK_NULL_HANDLE) &&
14607*b7893ccfSSadaf Ebrahimi ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14608*b7893ccfSSadaf Ebrahimi {
14609*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14610*b7893ccfSSadaf Ebrahimi return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14611*b7893ccfSSadaf Ebrahimi }
14612*b7893ccfSSadaf Ebrahimi
14613*b7893ccfSSadaf Ebrahimi if(createInfo.pool != VK_NULL_HANDLE)
14614*b7893ccfSSadaf Ebrahimi {
14615*b7893ccfSSadaf Ebrahimi const VkDeviceSize alignmentForPool = VMA_MAX(
14616*b7893ccfSSadaf Ebrahimi vkMemReq.alignment,
14617*b7893ccfSSadaf Ebrahimi GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14618*b7893ccfSSadaf Ebrahimi return createInfo.pool->m_BlockVector.Allocate(
14619*b7893ccfSSadaf Ebrahimi createInfo.pool,
14620*b7893ccfSSadaf Ebrahimi m_CurrentFrameIndex.load(),
14621*b7893ccfSSadaf Ebrahimi vkMemReq.size,
14622*b7893ccfSSadaf Ebrahimi alignmentForPool,
14623*b7893ccfSSadaf Ebrahimi createInfo,
14624*b7893ccfSSadaf Ebrahimi suballocType,
14625*b7893ccfSSadaf Ebrahimi allocationCount,
14626*b7893ccfSSadaf Ebrahimi pAllocations);
14627*b7893ccfSSadaf Ebrahimi }
14628*b7893ccfSSadaf Ebrahimi else
14629*b7893ccfSSadaf Ebrahimi {
14630*b7893ccfSSadaf Ebrahimi // Bit mask of memory Vulkan types acceptable for this allocation.
14631*b7893ccfSSadaf Ebrahimi uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14632*b7893ccfSSadaf Ebrahimi uint32_t memTypeIndex = UINT32_MAX;
14633*b7893ccfSSadaf Ebrahimi VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14634*b7893ccfSSadaf Ebrahimi if(res == VK_SUCCESS)
14635*b7893ccfSSadaf Ebrahimi {
14636*b7893ccfSSadaf Ebrahimi VkDeviceSize alignmentForMemType = VMA_MAX(
14637*b7893ccfSSadaf Ebrahimi vkMemReq.alignment,
14638*b7893ccfSSadaf Ebrahimi GetMemoryTypeMinAlignment(memTypeIndex));
14639*b7893ccfSSadaf Ebrahimi
14640*b7893ccfSSadaf Ebrahimi res = AllocateMemoryOfType(
14641*b7893ccfSSadaf Ebrahimi vkMemReq.size,
14642*b7893ccfSSadaf Ebrahimi alignmentForMemType,
14643*b7893ccfSSadaf Ebrahimi requiresDedicatedAllocation || prefersDedicatedAllocation,
14644*b7893ccfSSadaf Ebrahimi dedicatedBuffer,
14645*b7893ccfSSadaf Ebrahimi dedicatedImage,
14646*b7893ccfSSadaf Ebrahimi createInfo,
14647*b7893ccfSSadaf Ebrahimi memTypeIndex,
14648*b7893ccfSSadaf Ebrahimi suballocType,
14649*b7893ccfSSadaf Ebrahimi allocationCount,
14650*b7893ccfSSadaf Ebrahimi pAllocations);
14651*b7893ccfSSadaf Ebrahimi // Succeeded on first try.
14652*b7893ccfSSadaf Ebrahimi if(res == VK_SUCCESS)
14653*b7893ccfSSadaf Ebrahimi {
14654*b7893ccfSSadaf Ebrahimi return res;
14655*b7893ccfSSadaf Ebrahimi }
14656*b7893ccfSSadaf Ebrahimi // Allocation from this memory type failed. Try other compatible memory types.
14657*b7893ccfSSadaf Ebrahimi else
14658*b7893ccfSSadaf Ebrahimi {
14659*b7893ccfSSadaf Ebrahimi for(;;)
14660*b7893ccfSSadaf Ebrahimi {
14661*b7893ccfSSadaf Ebrahimi // Remove old memTypeIndex from list of possibilities.
14662*b7893ccfSSadaf Ebrahimi memoryTypeBits &= ~(1u << memTypeIndex);
14663*b7893ccfSSadaf Ebrahimi // Find alternative memTypeIndex.
14664*b7893ccfSSadaf Ebrahimi res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14665*b7893ccfSSadaf Ebrahimi if(res == VK_SUCCESS)
14666*b7893ccfSSadaf Ebrahimi {
14667*b7893ccfSSadaf Ebrahimi alignmentForMemType = VMA_MAX(
14668*b7893ccfSSadaf Ebrahimi vkMemReq.alignment,
14669*b7893ccfSSadaf Ebrahimi GetMemoryTypeMinAlignment(memTypeIndex));
14670*b7893ccfSSadaf Ebrahimi
14671*b7893ccfSSadaf Ebrahimi res = AllocateMemoryOfType(
14672*b7893ccfSSadaf Ebrahimi vkMemReq.size,
14673*b7893ccfSSadaf Ebrahimi alignmentForMemType,
14674*b7893ccfSSadaf Ebrahimi requiresDedicatedAllocation || prefersDedicatedAllocation,
14675*b7893ccfSSadaf Ebrahimi dedicatedBuffer,
14676*b7893ccfSSadaf Ebrahimi dedicatedImage,
14677*b7893ccfSSadaf Ebrahimi createInfo,
14678*b7893ccfSSadaf Ebrahimi memTypeIndex,
14679*b7893ccfSSadaf Ebrahimi suballocType,
14680*b7893ccfSSadaf Ebrahimi allocationCount,
14681*b7893ccfSSadaf Ebrahimi pAllocations);
14682*b7893ccfSSadaf Ebrahimi // Allocation from this alternative memory type succeeded.
14683*b7893ccfSSadaf Ebrahimi if(res == VK_SUCCESS)
14684*b7893ccfSSadaf Ebrahimi {
14685*b7893ccfSSadaf Ebrahimi return res;
14686*b7893ccfSSadaf Ebrahimi }
14687*b7893ccfSSadaf Ebrahimi // else: Allocation from this memory type failed. Try next one - next loop iteration.
14688*b7893ccfSSadaf Ebrahimi }
14689*b7893ccfSSadaf Ebrahimi // No other matching memory type index could be found.
14690*b7893ccfSSadaf Ebrahimi else
14691*b7893ccfSSadaf Ebrahimi {
14692*b7893ccfSSadaf Ebrahimi // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14693*b7893ccfSSadaf Ebrahimi return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14694*b7893ccfSSadaf Ebrahimi }
14695*b7893ccfSSadaf Ebrahimi }
14696*b7893ccfSSadaf Ebrahimi }
14697*b7893ccfSSadaf Ebrahimi }
14698*b7893ccfSSadaf Ebrahimi // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14699*b7893ccfSSadaf Ebrahimi else
14700*b7893ccfSSadaf Ebrahimi return res;
14701*b7893ccfSSadaf Ebrahimi }
14702*b7893ccfSSadaf Ebrahimi }
14703*b7893ccfSSadaf Ebrahimi
FreeMemory(size_t allocationCount,const VmaAllocation * pAllocations)14704*b7893ccfSSadaf Ebrahimi void VmaAllocator_T::FreeMemory(
14705*b7893ccfSSadaf Ebrahimi size_t allocationCount,
14706*b7893ccfSSadaf Ebrahimi const VmaAllocation* pAllocations)
14707*b7893ccfSSadaf Ebrahimi {
14708*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pAllocations);
14709*b7893ccfSSadaf Ebrahimi
14710*b7893ccfSSadaf Ebrahimi for(size_t allocIndex = allocationCount; allocIndex--; )
14711*b7893ccfSSadaf Ebrahimi {
14712*b7893ccfSSadaf Ebrahimi VmaAllocation allocation = pAllocations[allocIndex];
14713*b7893ccfSSadaf Ebrahimi
14714*b7893ccfSSadaf Ebrahimi if(allocation != VK_NULL_HANDLE)
14715*b7893ccfSSadaf Ebrahimi {
14716*b7893ccfSSadaf Ebrahimi if(TouchAllocation(allocation))
14717*b7893ccfSSadaf Ebrahimi {
14718*b7893ccfSSadaf Ebrahimi if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14719*b7893ccfSSadaf Ebrahimi {
14720*b7893ccfSSadaf Ebrahimi FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14721*b7893ccfSSadaf Ebrahimi }
14722*b7893ccfSSadaf Ebrahimi
14723*b7893ccfSSadaf Ebrahimi switch(allocation->GetType())
14724*b7893ccfSSadaf Ebrahimi {
14725*b7893ccfSSadaf Ebrahimi case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14726*b7893ccfSSadaf Ebrahimi {
14727*b7893ccfSSadaf Ebrahimi VmaBlockVector* pBlockVector = VMA_NULL;
14728*b7893ccfSSadaf Ebrahimi VmaPool hPool = allocation->GetPool();
14729*b7893ccfSSadaf Ebrahimi if(hPool != VK_NULL_HANDLE)
14730*b7893ccfSSadaf Ebrahimi {
14731*b7893ccfSSadaf Ebrahimi pBlockVector = &hPool->m_BlockVector;
14732*b7893ccfSSadaf Ebrahimi }
14733*b7893ccfSSadaf Ebrahimi else
14734*b7893ccfSSadaf Ebrahimi {
14735*b7893ccfSSadaf Ebrahimi const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14736*b7893ccfSSadaf Ebrahimi pBlockVector = m_pBlockVectors[memTypeIndex];
14737*b7893ccfSSadaf Ebrahimi }
14738*b7893ccfSSadaf Ebrahimi pBlockVector->Free(allocation);
14739*b7893ccfSSadaf Ebrahimi }
14740*b7893ccfSSadaf Ebrahimi break;
14741*b7893ccfSSadaf Ebrahimi case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14742*b7893ccfSSadaf Ebrahimi FreeDedicatedMemory(allocation);
14743*b7893ccfSSadaf Ebrahimi break;
14744*b7893ccfSSadaf Ebrahimi default:
14745*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
14746*b7893ccfSSadaf Ebrahimi }
14747*b7893ccfSSadaf Ebrahimi }
14748*b7893ccfSSadaf Ebrahimi
14749*b7893ccfSSadaf Ebrahimi allocation->SetUserData(this, VMA_NULL);
14750*b7893ccfSSadaf Ebrahimi vma_delete(this, allocation);
14751*b7893ccfSSadaf Ebrahimi }
14752*b7893ccfSSadaf Ebrahimi }
14753*b7893ccfSSadaf Ebrahimi }
14754*b7893ccfSSadaf Ebrahimi
ResizeAllocation(const VmaAllocation alloc,VkDeviceSize newSize)14755*b7893ccfSSadaf Ebrahimi VkResult VmaAllocator_T::ResizeAllocation(
14756*b7893ccfSSadaf Ebrahimi const VmaAllocation alloc,
14757*b7893ccfSSadaf Ebrahimi VkDeviceSize newSize)
14758*b7893ccfSSadaf Ebrahimi {
14759*b7893ccfSSadaf Ebrahimi if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14760*b7893ccfSSadaf Ebrahimi {
14761*b7893ccfSSadaf Ebrahimi return VK_ERROR_VALIDATION_FAILED_EXT;
14762*b7893ccfSSadaf Ebrahimi }
14763*b7893ccfSSadaf Ebrahimi if(newSize == alloc->GetSize())
14764*b7893ccfSSadaf Ebrahimi {
14765*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
14766*b7893ccfSSadaf Ebrahimi }
14767*b7893ccfSSadaf Ebrahimi
14768*b7893ccfSSadaf Ebrahimi switch(alloc->GetType())
14769*b7893ccfSSadaf Ebrahimi {
14770*b7893ccfSSadaf Ebrahimi case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14771*b7893ccfSSadaf Ebrahimi return VK_ERROR_FEATURE_NOT_PRESENT;
14772*b7893ccfSSadaf Ebrahimi case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14773*b7893ccfSSadaf Ebrahimi if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14774*b7893ccfSSadaf Ebrahimi {
14775*b7893ccfSSadaf Ebrahimi alloc->ChangeSize(newSize);
14776*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14777*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
14778*b7893ccfSSadaf Ebrahimi }
14779*b7893ccfSSadaf Ebrahimi else
14780*b7893ccfSSadaf Ebrahimi {
14781*b7893ccfSSadaf Ebrahimi return VK_ERROR_OUT_OF_POOL_MEMORY;
14782*b7893ccfSSadaf Ebrahimi }
14783*b7893ccfSSadaf Ebrahimi default:
14784*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
14785*b7893ccfSSadaf Ebrahimi return VK_ERROR_VALIDATION_FAILED_EXT;
14786*b7893ccfSSadaf Ebrahimi }
14787*b7893ccfSSadaf Ebrahimi }
14788*b7893ccfSSadaf Ebrahimi
CalculateStats(VmaStats * pStats)14789*b7893ccfSSadaf Ebrahimi void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14790*b7893ccfSSadaf Ebrahimi {
14791*b7893ccfSSadaf Ebrahimi // Initialize.
14792*b7893ccfSSadaf Ebrahimi InitStatInfo(pStats->total);
14793*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14794*b7893ccfSSadaf Ebrahimi InitStatInfo(pStats->memoryType[i]);
14795*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14796*b7893ccfSSadaf Ebrahimi InitStatInfo(pStats->memoryHeap[i]);
14797*b7893ccfSSadaf Ebrahimi
14798*b7893ccfSSadaf Ebrahimi // Process default pools.
14799*b7893ccfSSadaf Ebrahimi for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14800*b7893ccfSSadaf Ebrahimi {
14801*b7893ccfSSadaf Ebrahimi VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14802*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pBlockVector);
14803*b7893ccfSSadaf Ebrahimi pBlockVector->AddStats(pStats);
14804*b7893ccfSSadaf Ebrahimi }
14805*b7893ccfSSadaf Ebrahimi
14806*b7893ccfSSadaf Ebrahimi // Process custom pools.
14807*b7893ccfSSadaf Ebrahimi {
14808*b7893ccfSSadaf Ebrahimi VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14809*b7893ccfSSadaf Ebrahimi for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14810*b7893ccfSSadaf Ebrahimi {
14811*b7893ccfSSadaf Ebrahimi m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14812*b7893ccfSSadaf Ebrahimi }
14813*b7893ccfSSadaf Ebrahimi }
14814*b7893ccfSSadaf Ebrahimi
14815*b7893ccfSSadaf Ebrahimi // Process dedicated allocations.
14816*b7893ccfSSadaf Ebrahimi for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14817*b7893ccfSSadaf Ebrahimi {
14818*b7893ccfSSadaf Ebrahimi const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14819*b7893ccfSSadaf Ebrahimi VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14820*b7893ccfSSadaf Ebrahimi AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
14821*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pDedicatedAllocVector);
14822*b7893ccfSSadaf Ebrahimi for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
14823*b7893ccfSSadaf Ebrahimi {
14824*b7893ccfSSadaf Ebrahimi VmaStatInfo allocationStatInfo;
14825*b7893ccfSSadaf Ebrahimi (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
14826*b7893ccfSSadaf Ebrahimi VmaAddStatInfo(pStats->total, allocationStatInfo);
14827*b7893ccfSSadaf Ebrahimi VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14828*b7893ccfSSadaf Ebrahimi VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14829*b7893ccfSSadaf Ebrahimi }
14830*b7893ccfSSadaf Ebrahimi }
14831*b7893ccfSSadaf Ebrahimi
14832*b7893ccfSSadaf Ebrahimi // Postprocess.
14833*b7893ccfSSadaf Ebrahimi VmaPostprocessCalcStatInfo(pStats->total);
14834*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
14835*b7893ccfSSadaf Ebrahimi VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
14836*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
14837*b7893ccfSSadaf Ebrahimi VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
14838*b7893ccfSSadaf Ebrahimi }
14839*b7893ccfSSadaf Ebrahimi
14840*b7893ccfSSadaf Ebrahimi static const uint32_t VMA_VENDOR_ID_AMD = 4098;
14841*b7893ccfSSadaf Ebrahimi
DefragmentationBegin(const VmaDefragmentationInfo2 & info,VmaDefragmentationStats * pStats,VmaDefragmentationContext * pContext)14842*b7893ccfSSadaf Ebrahimi VkResult VmaAllocator_T::DefragmentationBegin(
14843*b7893ccfSSadaf Ebrahimi const VmaDefragmentationInfo2& info,
14844*b7893ccfSSadaf Ebrahimi VmaDefragmentationStats* pStats,
14845*b7893ccfSSadaf Ebrahimi VmaDefragmentationContext* pContext)
14846*b7893ccfSSadaf Ebrahimi {
14847*b7893ccfSSadaf Ebrahimi if(info.pAllocationsChanged != VMA_NULL)
14848*b7893ccfSSadaf Ebrahimi {
14849*b7893ccfSSadaf Ebrahimi memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
14850*b7893ccfSSadaf Ebrahimi }
14851*b7893ccfSSadaf Ebrahimi
14852*b7893ccfSSadaf Ebrahimi *pContext = vma_new(this, VmaDefragmentationContext_T)(
14853*b7893ccfSSadaf Ebrahimi this, m_CurrentFrameIndex.load(), info.flags, pStats);
14854*b7893ccfSSadaf Ebrahimi
14855*b7893ccfSSadaf Ebrahimi (*pContext)->AddPools(info.poolCount, info.pPools);
14856*b7893ccfSSadaf Ebrahimi (*pContext)->AddAllocations(
14857*b7893ccfSSadaf Ebrahimi info.allocationCount, info.pAllocations, info.pAllocationsChanged);
14858*b7893ccfSSadaf Ebrahimi
14859*b7893ccfSSadaf Ebrahimi VkResult res = (*pContext)->Defragment(
14860*b7893ccfSSadaf Ebrahimi info.maxCpuBytesToMove, info.maxCpuAllocationsToMove,
14861*b7893ccfSSadaf Ebrahimi info.maxGpuBytesToMove, info.maxGpuAllocationsToMove,
14862*b7893ccfSSadaf Ebrahimi info.commandBuffer, pStats);
14863*b7893ccfSSadaf Ebrahimi
14864*b7893ccfSSadaf Ebrahimi if(res != VK_NOT_READY)
14865*b7893ccfSSadaf Ebrahimi {
14866*b7893ccfSSadaf Ebrahimi vma_delete(this, *pContext);
14867*b7893ccfSSadaf Ebrahimi *pContext = VMA_NULL;
14868*b7893ccfSSadaf Ebrahimi }
14869*b7893ccfSSadaf Ebrahimi
14870*b7893ccfSSadaf Ebrahimi return res;
14871*b7893ccfSSadaf Ebrahimi }
14872*b7893ccfSSadaf Ebrahimi
DefragmentationEnd(VmaDefragmentationContext context)14873*b7893ccfSSadaf Ebrahimi VkResult VmaAllocator_T::DefragmentationEnd(
14874*b7893ccfSSadaf Ebrahimi VmaDefragmentationContext context)
14875*b7893ccfSSadaf Ebrahimi {
14876*b7893ccfSSadaf Ebrahimi vma_delete(this, context);
14877*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
14878*b7893ccfSSadaf Ebrahimi }
14879*b7893ccfSSadaf Ebrahimi
GetAllocationInfo(VmaAllocation hAllocation,VmaAllocationInfo * pAllocationInfo)14880*b7893ccfSSadaf Ebrahimi void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
14881*b7893ccfSSadaf Ebrahimi {
14882*b7893ccfSSadaf Ebrahimi if(hAllocation->CanBecomeLost())
14883*b7893ccfSSadaf Ebrahimi {
14884*b7893ccfSSadaf Ebrahimi /*
14885*b7893ccfSSadaf Ebrahimi Warning: This is a carefully designed algorithm.
14886*b7893ccfSSadaf Ebrahimi Do not modify unless you really know what you're doing :)
14887*b7893ccfSSadaf Ebrahimi */
14888*b7893ccfSSadaf Ebrahimi const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14889*b7893ccfSSadaf Ebrahimi uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14890*b7893ccfSSadaf Ebrahimi for(;;)
14891*b7893ccfSSadaf Ebrahimi {
14892*b7893ccfSSadaf Ebrahimi if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14893*b7893ccfSSadaf Ebrahimi {
14894*b7893ccfSSadaf Ebrahimi pAllocationInfo->memoryType = UINT32_MAX;
14895*b7893ccfSSadaf Ebrahimi pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
14896*b7893ccfSSadaf Ebrahimi pAllocationInfo->offset = 0;
14897*b7893ccfSSadaf Ebrahimi pAllocationInfo->size = hAllocation->GetSize();
14898*b7893ccfSSadaf Ebrahimi pAllocationInfo->pMappedData = VMA_NULL;
14899*b7893ccfSSadaf Ebrahimi pAllocationInfo->pUserData = hAllocation->GetUserData();
14900*b7893ccfSSadaf Ebrahimi return;
14901*b7893ccfSSadaf Ebrahimi }
14902*b7893ccfSSadaf Ebrahimi else if(localLastUseFrameIndex == localCurrFrameIndex)
14903*b7893ccfSSadaf Ebrahimi {
14904*b7893ccfSSadaf Ebrahimi pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14905*b7893ccfSSadaf Ebrahimi pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14906*b7893ccfSSadaf Ebrahimi pAllocationInfo->offset = hAllocation->GetOffset();
14907*b7893ccfSSadaf Ebrahimi pAllocationInfo->size = hAllocation->GetSize();
14908*b7893ccfSSadaf Ebrahimi pAllocationInfo->pMappedData = VMA_NULL;
14909*b7893ccfSSadaf Ebrahimi pAllocationInfo->pUserData = hAllocation->GetUserData();
14910*b7893ccfSSadaf Ebrahimi return;
14911*b7893ccfSSadaf Ebrahimi }
14912*b7893ccfSSadaf Ebrahimi else // Last use time earlier than current time.
14913*b7893ccfSSadaf Ebrahimi {
14914*b7893ccfSSadaf Ebrahimi if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14915*b7893ccfSSadaf Ebrahimi {
14916*b7893ccfSSadaf Ebrahimi localLastUseFrameIndex = localCurrFrameIndex;
14917*b7893ccfSSadaf Ebrahimi }
14918*b7893ccfSSadaf Ebrahimi }
14919*b7893ccfSSadaf Ebrahimi }
14920*b7893ccfSSadaf Ebrahimi }
14921*b7893ccfSSadaf Ebrahimi else
14922*b7893ccfSSadaf Ebrahimi {
14923*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
14924*b7893ccfSSadaf Ebrahimi uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14925*b7893ccfSSadaf Ebrahimi uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14926*b7893ccfSSadaf Ebrahimi for(;;)
14927*b7893ccfSSadaf Ebrahimi {
14928*b7893ccfSSadaf Ebrahimi VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14929*b7893ccfSSadaf Ebrahimi if(localLastUseFrameIndex == localCurrFrameIndex)
14930*b7893ccfSSadaf Ebrahimi {
14931*b7893ccfSSadaf Ebrahimi break;
14932*b7893ccfSSadaf Ebrahimi }
14933*b7893ccfSSadaf Ebrahimi else // Last use time earlier than current time.
14934*b7893ccfSSadaf Ebrahimi {
14935*b7893ccfSSadaf Ebrahimi if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14936*b7893ccfSSadaf Ebrahimi {
14937*b7893ccfSSadaf Ebrahimi localLastUseFrameIndex = localCurrFrameIndex;
14938*b7893ccfSSadaf Ebrahimi }
14939*b7893ccfSSadaf Ebrahimi }
14940*b7893ccfSSadaf Ebrahimi }
14941*b7893ccfSSadaf Ebrahimi #endif
14942*b7893ccfSSadaf Ebrahimi
14943*b7893ccfSSadaf Ebrahimi pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14944*b7893ccfSSadaf Ebrahimi pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14945*b7893ccfSSadaf Ebrahimi pAllocationInfo->offset = hAllocation->GetOffset();
14946*b7893ccfSSadaf Ebrahimi pAllocationInfo->size = hAllocation->GetSize();
14947*b7893ccfSSadaf Ebrahimi pAllocationInfo->pMappedData = hAllocation->GetMappedData();
14948*b7893ccfSSadaf Ebrahimi pAllocationInfo->pUserData = hAllocation->GetUserData();
14949*b7893ccfSSadaf Ebrahimi }
14950*b7893ccfSSadaf Ebrahimi }
14951*b7893ccfSSadaf Ebrahimi
TouchAllocation(VmaAllocation hAllocation)14952*b7893ccfSSadaf Ebrahimi bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
14953*b7893ccfSSadaf Ebrahimi {
14954*b7893ccfSSadaf Ebrahimi // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
14955*b7893ccfSSadaf Ebrahimi if(hAllocation->CanBecomeLost())
14956*b7893ccfSSadaf Ebrahimi {
14957*b7893ccfSSadaf Ebrahimi uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14958*b7893ccfSSadaf Ebrahimi uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14959*b7893ccfSSadaf Ebrahimi for(;;)
14960*b7893ccfSSadaf Ebrahimi {
14961*b7893ccfSSadaf Ebrahimi if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14962*b7893ccfSSadaf Ebrahimi {
14963*b7893ccfSSadaf Ebrahimi return false;
14964*b7893ccfSSadaf Ebrahimi }
14965*b7893ccfSSadaf Ebrahimi else if(localLastUseFrameIndex == localCurrFrameIndex)
14966*b7893ccfSSadaf Ebrahimi {
14967*b7893ccfSSadaf Ebrahimi return true;
14968*b7893ccfSSadaf Ebrahimi }
14969*b7893ccfSSadaf Ebrahimi else // Last use time earlier than current time.
14970*b7893ccfSSadaf Ebrahimi {
14971*b7893ccfSSadaf Ebrahimi if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14972*b7893ccfSSadaf Ebrahimi {
14973*b7893ccfSSadaf Ebrahimi localLastUseFrameIndex = localCurrFrameIndex;
14974*b7893ccfSSadaf Ebrahimi }
14975*b7893ccfSSadaf Ebrahimi }
14976*b7893ccfSSadaf Ebrahimi }
14977*b7893ccfSSadaf Ebrahimi }
14978*b7893ccfSSadaf Ebrahimi else
14979*b7893ccfSSadaf Ebrahimi {
14980*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
14981*b7893ccfSSadaf Ebrahimi uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14982*b7893ccfSSadaf Ebrahimi uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14983*b7893ccfSSadaf Ebrahimi for(;;)
14984*b7893ccfSSadaf Ebrahimi {
14985*b7893ccfSSadaf Ebrahimi VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14986*b7893ccfSSadaf Ebrahimi if(localLastUseFrameIndex == localCurrFrameIndex)
14987*b7893ccfSSadaf Ebrahimi {
14988*b7893ccfSSadaf Ebrahimi break;
14989*b7893ccfSSadaf Ebrahimi }
14990*b7893ccfSSadaf Ebrahimi else // Last use time earlier than current time.
14991*b7893ccfSSadaf Ebrahimi {
14992*b7893ccfSSadaf Ebrahimi if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14993*b7893ccfSSadaf Ebrahimi {
14994*b7893ccfSSadaf Ebrahimi localLastUseFrameIndex = localCurrFrameIndex;
14995*b7893ccfSSadaf Ebrahimi }
14996*b7893ccfSSadaf Ebrahimi }
14997*b7893ccfSSadaf Ebrahimi }
14998*b7893ccfSSadaf Ebrahimi #endif
14999*b7893ccfSSadaf Ebrahimi
15000*b7893ccfSSadaf Ebrahimi return true;
15001*b7893ccfSSadaf Ebrahimi }
15002*b7893ccfSSadaf Ebrahimi }
15003*b7893ccfSSadaf Ebrahimi
CreatePool(const VmaPoolCreateInfo * pCreateInfo,VmaPool * pPool)15004*b7893ccfSSadaf Ebrahimi VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15005*b7893ccfSSadaf Ebrahimi {
15006*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15007*b7893ccfSSadaf Ebrahimi
15008*b7893ccfSSadaf Ebrahimi VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15009*b7893ccfSSadaf Ebrahimi
15010*b7893ccfSSadaf Ebrahimi if(newCreateInfo.maxBlockCount == 0)
15011*b7893ccfSSadaf Ebrahimi {
15012*b7893ccfSSadaf Ebrahimi newCreateInfo.maxBlockCount = SIZE_MAX;
15013*b7893ccfSSadaf Ebrahimi }
15014*b7893ccfSSadaf Ebrahimi if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15015*b7893ccfSSadaf Ebrahimi {
15016*b7893ccfSSadaf Ebrahimi return VK_ERROR_INITIALIZATION_FAILED;
15017*b7893ccfSSadaf Ebrahimi }
15018*b7893ccfSSadaf Ebrahimi
15019*b7893ccfSSadaf Ebrahimi const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15020*b7893ccfSSadaf Ebrahimi
15021*b7893ccfSSadaf Ebrahimi *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15022*b7893ccfSSadaf Ebrahimi
15023*b7893ccfSSadaf Ebrahimi VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15024*b7893ccfSSadaf Ebrahimi if(res != VK_SUCCESS)
15025*b7893ccfSSadaf Ebrahimi {
15026*b7893ccfSSadaf Ebrahimi vma_delete(this, *pPool);
15027*b7893ccfSSadaf Ebrahimi *pPool = VMA_NULL;
15028*b7893ccfSSadaf Ebrahimi return res;
15029*b7893ccfSSadaf Ebrahimi }
15030*b7893ccfSSadaf Ebrahimi
15031*b7893ccfSSadaf Ebrahimi // Add to m_Pools.
15032*b7893ccfSSadaf Ebrahimi {
15033*b7893ccfSSadaf Ebrahimi VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15034*b7893ccfSSadaf Ebrahimi (*pPool)->SetId(m_NextPoolId++);
15035*b7893ccfSSadaf Ebrahimi VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15036*b7893ccfSSadaf Ebrahimi }
15037*b7893ccfSSadaf Ebrahimi
15038*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
15039*b7893ccfSSadaf Ebrahimi }
15040*b7893ccfSSadaf Ebrahimi
DestroyPool(VmaPool pool)15041*b7893ccfSSadaf Ebrahimi void VmaAllocator_T::DestroyPool(VmaPool pool)
15042*b7893ccfSSadaf Ebrahimi {
15043*b7893ccfSSadaf Ebrahimi // Remove from m_Pools.
15044*b7893ccfSSadaf Ebrahimi {
15045*b7893ccfSSadaf Ebrahimi VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15046*b7893ccfSSadaf Ebrahimi bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15047*b7893ccfSSadaf Ebrahimi VMA_ASSERT(success && "Pool not found in Allocator.");
15048*b7893ccfSSadaf Ebrahimi }
15049*b7893ccfSSadaf Ebrahimi
15050*b7893ccfSSadaf Ebrahimi vma_delete(this, pool);
15051*b7893ccfSSadaf Ebrahimi }
15052*b7893ccfSSadaf Ebrahimi
GetPoolStats(VmaPool pool,VmaPoolStats * pPoolStats)15053*b7893ccfSSadaf Ebrahimi void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15054*b7893ccfSSadaf Ebrahimi {
15055*b7893ccfSSadaf Ebrahimi pool->m_BlockVector.GetPoolStats(pPoolStats);
15056*b7893ccfSSadaf Ebrahimi }
15057*b7893ccfSSadaf Ebrahimi
SetCurrentFrameIndex(uint32_t frameIndex)15058*b7893ccfSSadaf Ebrahimi void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15059*b7893ccfSSadaf Ebrahimi {
15060*b7893ccfSSadaf Ebrahimi m_CurrentFrameIndex.store(frameIndex);
15061*b7893ccfSSadaf Ebrahimi }
15062*b7893ccfSSadaf Ebrahimi
MakePoolAllocationsLost(VmaPool hPool,size_t * pLostAllocationCount)15063*b7893ccfSSadaf Ebrahimi void VmaAllocator_T::MakePoolAllocationsLost(
15064*b7893ccfSSadaf Ebrahimi VmaPool hPool,
15065*b7893ccfSSadaf Ebrahimi size_t* pLostAllocationCount)
15066*b7893ccfSSadaf Ebrahimi {
15067*b7893ccfSSadaf Ebrahimi hPool->m_BlockVector.MakePoolAllocationsLost(
15068*b7893ccfSSadaf Ebrahimi m_CurrentFrameIndex.load(),
15069*b7893ccfSSadaf Ebrahimi pLostAllocationCount);
15070*b7893ccfSSadaf Ebrahimi }
15071*b7893ccfSSadaf Ebrahimi
CheckPoolCorruption(VmaPool hPool)15072*b7893ccfSSadaf Ebrahimi VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15073*b7893ccfSSadaf Ebrahimi {
15074*b7893ccfSSadaf Ebrahimi return hPool->m_BlockVector.CheckCorruption();
15075*b7893ccfSSadaf Ebrahimi }
15076*b7893ccfSSadaf Ebrahimi
CheckCorruption(uint32_t memoryTypeBits)15077*b7893ccfSSadaf Ebrahimi VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15078*b7893ccfSSadaf Ebrahimi {
15079*b7893ccfSSadaf Ebrahimi VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15080*b7893ccfSSadaf Ebrahimi
15081*b7893ccfSSadaf Ebrahimi // Process default pools.
15082*b7893ccfSSadaf Ebrahimi for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15083*b7893ccfSSadaf Ebrahimi {
15084*b7893ccfSSadaf Ebrahimi if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15085*b7893ccfSSadaf Ebrahimi {
15086*b7893ccfSSadaf Ebrahimi VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15087*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pBlockVector);
15088*b7893ccfSSadaf Ebrahimi VkResult localRes = pBlockVector->CheckCorruption();
15089*b7893ccfSSadaf Ebrahimi switch(localRes)
15090*b7893ccfSSadaf Ebrahimi {
15091*b7893ccfSSadaf Ebrahimi case VK_ERROR_FEATURE_NOT_PRESENT:
15092*b7893ccfSSadaf Ebrahimi break;
15093*b7893ccfSSadaf Ebrahimi case VK_SUCCESS:
15094*b7893ccfSSadaf Ebrahimi finalRes = VK_SUCCESS;
15095*b7893ccfSSadaf Ebrahimi break;
15096*b7893ccfSSadaf Ebrahimi default:
15097*b7893ccfSSadaf Ebrahimi return localRes;
15098*b7893ccfSSadaf Ebrahimi }
15099*b7893ccfSSadaf Ebrahimi }
15100*b7893ccfSSadaf Ebrahimi }
15101*b7893ccfSSadaf Ebrahimi
15102*b7893ccfSSadaf Ebrahimi // Process custom pools.
15103*b7893ccfSSadaf Ebrahimi {
15104*b7893ccfSSadaf Ebrahimi VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15105*b7893ccfSSadaf Ebrahimi for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15106*b7893ccfSSadaf Ebrahimi {
15107*b7893ccfSSadaf Ebrahimi if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15108*b7893ccfSSadaf Ebrahimi {
15109*b7893ccfSSadaf Ebrahimi VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15110*b7893ccfSSadaf Ebrahimi switch(localRes)
15111*b7893ccfSSadaf Ebrahimi {
15112*b7893ccfSSadaf Ebrahimi case VK_ERROR_FEATURE_NOT_PRESENT:
15113*b7893ccfSSadaf Ebrahimi break;
15114*b7893ccfSSadaf Ebrahimi case VK_SUCCESS:
15115*b7893ccfSSadaf Ebrahimi finalRes = VK_SUCCESS;
15116*b7893ccfSSadaf Ebrahimi break;
15117*b7893ccfSSadaf Ebrahimi default:
15118*b7893ccfSSadaf Ebrahimi return localRes;
15119*b7893ccfSSadaf Ebrahimi }
15120*b7893ccfSSadaf Ebrahimi }
15121*b7893ccfSSadaf Ebrahimi }
15122*b7893ccfSSadaf Ebrahimi }
15123*b7893ccfSSadaf Ebrahimi
15124*b7893ccfSSadaf Ebrahimi return finalRes;
15125*b7893ccfSSadaf Ebrahimi }
15126*b7893ccfSSadaf Ebrahimi
CreateLostAllocation(VmaAllocation * pAllocation)15127*b7893ccfSSadaf Ebrahimi void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15128*b7893ccfSSadaf Ebrahimi {
15129*b7893ccfSSadaf Ebrahimi *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
15130*b7893ccfSSadaf Ebrahimi (*pAllocation)->InitLost();
15131*b7893ccfSSadaf Ebrahimi }
15132*b7893ccfSSadaf Ebrahimi
AllocateVulkanMemory(const VkMemoryAllocateInfo * pAllocateInfo,VkDeviceMemory * pMemory)15133*b7893ccfSSadaf Ebrahimi VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15134*b7893ccfSSadaf Ebrahimi {
15135*b7893ccfSSadaf Ebrahimi const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15136*b7893ccfSSadaf Ebrahimi
15137*b7893ccfSSadaf Ebrahimi VkResult res;
15138*b7893ccfSSadaf Ebrahimi if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15139*b7893ccfSSadaf Ebrahimi {
15140*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15141*b7893ccfSSadaf Ebrahimi if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15142*b7893ccfSSadaf Ebrahimi {
15143*b7893ccfSSadaf Ebrahimi res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15144*b7893ccfSSadaf Ebrahimi if(res == VK_SUCCESS)
15145*b7893ccfSSadaf Ebrahimi {
15146*b7893ccfSSadaf Ebrahimi m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15147*b7893ccfSSadaf Ebrahimi }
15148*b7893ccfSSadaf Ebrahimi }
15149*b7893ccfSSadaf Ebrahimi else
15150*b7893ccfSSadaf Ebrahimi {
15151*b7893ccfSSadaf Ebrahimi res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15152*b7893ccfSSadaf Ebrahimi }
15153*b7893ccfSSadaf Ebrahimi }
15154*b7893ccfSSadaf Ebrahimi else
15155*b7893ccfSSadaf Ebrahimi {
15156*b7893ccfSSadaf Ebrahimi res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15157*b7893ccfSSadaf Ebrahimi }
15158*b7893ccfSSadaf Ebrahimi
15159*b7893ccfSSadaf Ebrahimi if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15160*b7893ccfSSadaf Ebrahimi {
15161*b7893ccfSSadaf Ebrahimi (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15162*b7893ccfSSadaf Ebrahimi }
15163*b7893ccfSSadaf Ebrahimi
15164*b7893ccfSSadaf Ebrahimi return res;
15165*b7893ccfSSadaf Ebrahimi }
15166*b7893ccfSSadaf Ebrahimi
FreeVulkanMemory(uint32_t memoryType,VkDeviceSize size,VkDeviceMemory hMemory)15167*b7893ccfSSadaf Ebrahimi void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15168*b7893ccfSSadaf Ebrahimi {
15169*b7893ccfSSadaf Ebrahimi if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15170*b7893ccfSSadaf Ebrahimi {
15171*b7893ccfSSadaf Ebrahimi (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15172*b7893ccfSSadaf Ebrahimi }
15173*b7893ccfSSadaf Ebrahimi
15174*b7893ccfSSadaf Ebrahimi (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15175*b7893ccfSSadaf Ebrahimi
15176*b7893ccfSSadaf Ebrahimi const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15177*b7893ccfSSadaf Ebrahimi if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15178*b7893ccfSSadaf Ebrahimi {
15179*b7893ccfSSadaf Ebrahimi VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15180*b7893ccfSSadaf Ebrahimi m_HeapSizeLimit[heapIndex] += size;
15181*b7893ccfSSadaf Ebrahimi }
15182*b7893ccfSSadaf Ebrahimi }
15183*b7893ccfSSadaf Ebrahimi
Map(VmaAllocation hAllocation,void ** ppData)15184*b7893ccfSSadaf Ebrahimi VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15185*b7893ccfSSadaf Ebrahimi {
15186*b7893ccfSSadaf Ebrahimi if(hAllocation->CanBecomeLost())
15187*b7893ccfSSadaf Ebrahimi {
15188*b7893ccfSSadaf Ebrahimi return VK_ERROR_MEMORY_MAP_FAILED;
15189*b7893ccfSSadaf Ebrahimi }
15190*b7893ccfSSadaf Ebrahimi
15191*b7893ccfSSadaf Ebrahimi switch(hAllocation->GetType())
15192*b7893ccfSSadaf Ebrahimi {
15193*b7893ccfSSadaf Ebrahimi case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15194*b7893ccfSSadaf Ebrahimi {
15195*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15196*b7893ccfSSadaf Ebrahimi char *pBytes = VMA_NULL;
15197*b7893ccfSSadaf Ebrahimi VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15198*b7893ccfSSadaf Ebrahimi if(res == VK_SUCCESS)
15199*b7893ccfSSadaf Ebrahimi {
15200*b7893ccfSSadaf Ebrahimi *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15201*b7893ccfSSadaf Ebrahimi hAllocation->BlockAllocMap();
15202*b7893ccfSSadaf Ebrahimi }
15203*b7893ccfSSadaf Ebrahimi return res;
15204*b7893ccfSSadaf Ebrahimi }
15205*b7893ccfSSadaf Ebrahimi case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15206*b7893ccfSSadaf Ebrahimi return hAllocation->DedicatedAllocMap(this, ppData);
15207*b7893ccfSSadaf Ebrahimi default:
15208*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
15209*b7893ccfSSadaf Ebrahimi return VK_ERROR_MEMORY_MAP_FAILED;
15210*b7893ccfSSadaf Ebrahimi }
15211*b7893ccfSSadaf Ebrahimi }
15212*b7893ccfSSadaf Ebrahimi
Unmap(VmaAllocation hAllocation)15213*b7893ccfSSadaf Ebrahimi void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15214*b7893ccfSSadaf Ebrahimi {
15215*b7893ccfSSadaf Ebrahimi switch(hAllocation->GetType())
15216*b7893ccfSSadaf Ebrahimi {
15217*b7893ccfSSadaf Ebrahimi case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15218*b7893ccfSSadaf Ebrahimi {
15219*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15220*b7893ccfSSadaf Ebrahimi hAllocation->BlockAllocUnmap();
15221*b7893ccfSSadaf Ebrahimi pBlock->Unmap(this, 1);
15222*b7893ccfSSadaf Ebrahimi }
15223*b7893ccfSSadaf Ebrahimi break;
15224*b7893ccfSSadaf Ebrahimi case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15225*b7893ccfSSadaf Ebrahimi hAllocation->DedicatedAllocUnmap(this);
15226*b7893ccfSSadaf Ebrahimi break;
15227*b7893ccfSSadaf Ebrahimi default:
15228*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
15229*b7893ccfSSadaf Ebrahimi }
15230*b7893ccfSSadaf Ebrahimi }
15231*b7893ccfSSadaf Ebrahimi
BindBufferMemory(VmaAllocation hAllocation,VkBuffer hBuffer)15232*b7893ccfSSadaf Ebrahimi VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
15233*b7893ccfSSadaf Ebrahimi {
15234*b7893ccfSSadaf Ebrahimi VkResult res = VK_SUCCESS;
15235*b7893ccfSSadaf Ebrahimi switch(hAllocation->GetType())
15236*b7893ccfSSadaf Ebrahimi {
15237*b7893ccfSSadaf Ebrahimi case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15238*b7893ccfSSadaf Ebrahimi res = GetVulkanFunctions().vkBindBufferMemory(
15239*b7893ccfSSadaf Ebrahimi m_hDevice,
15240*b7893ccfSSadaf Ebrahimi hBuffer,
15241*b7893ccfSSadaf Ebrahimi hAllocation->GetMemory(),
15242*b7893ccfSSadaf Ebrahimi 0); //memoryOffset
15243*b7893ccfSSadaf Ebrahimi break;
15244*b7893ccfSSadaf Ebrahimi case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15245*b7893ccfSSadaf Ebrahimi {
15246*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15247*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15248*b7893ccfSSadaf Ebrahimi res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
15249*b7893ccfSSadaf Ebrahimi break;
15250*b7893ccfSSadaf Ebrahimi }
15251*b7893ccfSSadaf Ebrahimi default:
15252*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
15253*b7893ccfSSadaf Ebrahimi }
15254*b7893ccfSSadaf Ebrahimi return res;
15255*b7893ccfSSadaf Ebrahimi }
15256*b7893ccfSSadaf Ebrahimi
BindImageMemory(VmaAllocation hAllocation,VkImage hImage)15257*b7893ccfSSadaf Ebrahimi VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
15258*b7893ccfSSadaf Ebrahimi {
15259*b7893ccfSSadaf Ebrahimi VkResult res = VK_SUCCESS;
15260*b7893ccfSSadaf Ebrahimi switch(hAllocation->GetType())
15261*b7893ccfSSadaf Ebrahimi {
15262*b7893ccfSSadaf Ebrahimi case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15263*b7893ccfSSadaf Ebrahimi res = GetVulkanFunctions().vkBindImageMemory(
15264*b7893ccfSSadaf Ebrahimi m_hDevice,
15265*b7893ccfSSadaf Ebrahimi hImage,
15266*b7893ccfSSadaf Ebrahimi hAllocation->GetMemory(),
15267*b7893ccfSSadaf Ebrahimi 0); //memoryOffset
15268*b7893ccfSSadaf Ebrahimi break;
15269*b7893ccfSSadaf Ebrahimi case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15270*b7893ccfSSadaf Ebrahimi {
15271*b7893ccfSSadaf Ebrahimi VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15272*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15273*b7893ccfSSadaf Ebrahimi res = pBlock->BindImageMemory(this, hAllocation, hImage);
15274*b7893ccfSSadaf Ebrahimi break;
15275*b7893ccfSSadaf Ebrahimi }
15276*b7893ccfSSadaf Ebrahimi default:
15277*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
15278*b7893ccfSSadaf Ebrahimi }
15279*b7893ccfSSadaf Ebrahimi return res;
15280*b7893ccfSSadaf Ebrahimi }
15281*b7893ccfSSadaf Ebrahimi
FlushOrInvalidateAllocation(VmaAllocation hAllocation,VkDeviceSize offset,VkDeviceSize size,VMA_CACHE_OPERATION op)15282*b7893ccfSSadaf Ebrahimi void VmaAllocator_T::FlushOrInvalidateAllocation(
15283*b7893ccfSSadaf Ebrahimi VmaAllocation hAllocation,
15284*b7893ccfSSadaf Ebrahimi VkDeviceSize offset, VkDeviceSize size,
15285*b7893ccfSSadaf Ebrahimi VMA_CACHE_OPERATION op)
15286*b7893ccfSSadaf Ebrahimi {
15287*b7893ccfSSadaf Ebrahimi const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15288*b7893ccfSSadaf Ebrahimi if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15289*b7893ccfSSadaf Ebrahimi {
15290*b7893ccfSSadaf Ebrahimi const VkDeviceSize allocationSize = hAllocation->GetSize();
15291*b7893ccfSSadaf Ebrahimi VMA_ASSERT(offset <= allocationSize);
15292*b7893ccfSSadaf Ebrahimi
15293*b7893ccfSSadaf Ebrahimi const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15294*b7893ccfSSadaf Ebrahimi
15295*b7893ccfSSadaf Ebrahimi VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15296*b7893ccfSSadaf Ebrahimi memRange.memory = hAllocation->GetMemory();
15297*b7893ccfSSadaf Ebrahimi
15298*b7893ccfSSadaf Ebrahimi switch(hAllocation->GetType())
15299*b7893ccfSSadaf Ebrahimi {
15300*b7893ccfSSadaf Ebrahimi case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15301*b7893ccfSSadaf Ebrahimi memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15302*b7893ccfSSadaf Ebrahimi if(size == VK_WHOLE_SIZE)
15303*b7893ccfSSadaf Ebrahimi {
15304*b7893ccfSSadaf Ebrahimi memRange.size = allocationSize - memRange.offset;
15305*b7893ccfSSadaf Ebrahimi }
15306*b7893ccfSSadaf Ebrahimi else
15307*b7893ccfSSadaf Ebrahimi {
15308*b7893ccfSSadaf Ebrahimi VMA_ASSERT(offset + size <= allocationSize);
15309*b7893ccfSSadaf Ebrahimi memRange.size = VMA_MIN(
15310*b7893ccfSSadaf Ebrahimi VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15311*b7893ccfSSadaf Ebrahimi allocationSize - memRange.offset);
15312*b7893ccfSSadaf Ebrahimi }
15313*b7893ccfSSadaf Ebrahimi break;
15314*b7893ccfSSadaf Ebrahimi
15315*b7893ccfSSadaf Ebrahimi case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15316*b7893ccfSSadaf Ebrahimi {
15317*b7893ccfSSadaf Ebrahimi // 1. Still within this allocation.
15318*b7893ccfSSadaf Ebrahimi memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15319*b7893ccfSSadaf Ebrahimi if(size == VK_WHOLE_SIZE)
15320*b7893ccfSSadaf Ebrahimi {
15321*b7893ccfSSadaf Ebrahimi size = allocationSize - offset;
15322*b7893ccfSSadaf Ebrahimi }
15323*b7893ccfSSadaf Ebrahimi else
15324*b7893ccfSSadaf Ebrahimi {
15325*b7893ccfSSadaf Ebrahimi VMA_ASSERT(offset + size <= allocationSize);
15326*b7893ccfSSadaf Ebrahimi }
15327*b7893ccfSSadaf Ebrahimi memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15328*b7893ccfSSadaf Ebrahimi
15329*b7893ccfSSadaf Ebrahimi // 2. Adjust to whole block.
15330*b7893ccfSSadaf Ebrahimi const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15331*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15332*b7893ccfSSadaf Ebrahimi const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15333*b7893ccfSSadaf Ebrahimi memRange.offset += allocationOffset;
15334*b7893ccfSSadaf Ebrahimi memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15335*b7893ccfSSadaf Ebrahimi
15336*b7893ccfSSadaf Ebrahimi break;
15337*b7893ccfSSadaf Ebrahimi }
15338*b7893ccfSSadaf Ebrahimi
15339*b7893ccfSSadaf Ebrahimi default:
15340*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
15341*b7893ccfSSadaf Ebrahimi }
15342*b7893ccfSSadaf Ebrahimi
15343*b7893ccfSSadaf Ebrahimi switch(op)
15344*b7893ccfSSadaf Ebrahimi {
15345*b7893ccfSSadaf Ebrahimi case VMA_CACHE_FLUSH:
15346*b7893ccfSSadaf Ebrahimi (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15347*b7893ccfSSadaf Ebrahimi break;
15348*b7893ccfSSadaf Ebrahimi case VMA_CACHE_INVALIDATE:
15349*b7893ccfSSadaf Ebrahimi (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15350*b7893ccfSSadaf Ebrahimi break;
15351*b7893ccfSSadaf Ebrahimi default:
15352*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0);
15353*b7893ccfSSadaf Ebrahimi }
15354*b7893ccfSSadaf Ebrahimi }
15355*b7893ccfSSadaf Ebrahimi // else: Just ignore this call.
15356*b7893ccfSSadaf Ebrahimi }
15357*b7893ccfSSadaf Ebrahimi
FreeDedicatedMemory(VmaAllocation allocation)15358*b7893ccfSSadaf Ebrahimi void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15359*b7893ccfSSadaf Ebrahimi {
15360*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15361*b7893ccfSSadaf Ebrahimi
15362*b7893ccfSSadaf Ebrahimi const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15363*b7893ccfSSadaf Ebrahimi {
15364*b7893ccfSSadaf Ebrahimi VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15365*b7893ccfSSadaf Ebrahimi AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15366*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pDedicatedAllocations);
15367*b7893ccfSSadaf Ebrahimi bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15368*b7893ccfSSadaf Ebrahimi VMA_ASSERT(success);
15369*b7893ccfSSadaf Ebrahimi }
15370*b7893ccfSSadaf Ebrahimi
15371*b7893ccfSSadaf Ebrahimi VkDeviceMemory hMemory = allocation->GetMemory();
15372*b7893ccfSSadaf Ebrahimi
15373*b7893ccfSSadaf Ebrahimi /*
15374*b7893ccfSSadaf Ebrahimi There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15375*b7893ccfSSadaf Ebrahimi before vkFreeMemory.
15376*b7893ccfSSadaf Ebrahimi
15377*b7893ccfSSadaf Ebrahimi if(allocation->GetMappedData() != VMA_NULL)
15378*b7893ccfSSadaf Ebrahimi {
15379*b7893ccfSSadaf Ebrahimi (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15380*b7893ccfSSadaf Ebrahimi }
15381*b7893ccfSSadaf Ebrahimi */
15382*b7893ccfSSadaf Ebrahimi
15383*b7893ccfSSadaf Ebrahimi FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15384*b7893ccfSSadaf Ebrahimi
15385*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15386*b7893ccfSSadaf Ebrahimi }
15387*b7893ccfSSadaf Ebrahimi
FillAllocation(const VmaAllocation hAllocation,uint8_t pattern)15388*b7893ccfSSadaf Ebrahimi void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15389*b7893ccfSSadaf Ebrahimi {
15390*b7893ccfSSadaf Ebrahimi if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15391*b7893ccfSSadaf Ebrahimi !hAllocation->CanBecomeLost() &&
15392*b7893ccfSSadaf Ebrahimi (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15393*b7893ccfSSadaf Ebrahimi {
15394*b7893ccfSSadaf Ebrahimi void* pData = VMA_NULL;
15395*b7893ccfSSadaf Ebrahimi VkResult res = Map(hAllocation, &pData);
15396*b7893ccfSSadaf Ebrahimi if(res == VK_SUCCESS)
15397*b7893ccfSSadaf Ebrahimi {
15398*b7893ccfSSadaf Ebrahimi memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15399*b7893ccfSSadaf Ebrahimi FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15400*b7893ccfSSadaf Ebrahimi Unmap(hAllocation);
15401*b7893ccfSSadaf Ebrahimi }
15402*b7893ccfSSadaf Ebrahimi else
15403*b7893ccfSSadaf Ebrahimi {
15404*b7893ccfSSadaf Ebrahimi VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15405*b7893ccfSSadaf Ebrahimi }
15406*b7893ccfSSadaf Ebrahimi }
15407*b7893ccfSSadaf Ebrahimi }
15408*b7893ccfSSadaf Ebrahimi
15409*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
15410*b7893ccfSSadaf Ebrahimi
PrintDetailedMap(VmaJsonWriter & json)15411*b7893ccfSSadaf Ebrahimi void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15412*b7893ccfSSadaf Ebrahimi {
15413*b7893ccfSSadaf Ebrahimi bool dedicatedAllocationsStarted = false;
15414*b7893ccfSSadaf Ebrahimi for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15415*b7893ccfSSadaf Ebrahimi {
15416*b7893ccfSSadaf Ebrahimi VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15417*b7893ccfSSadaf Ebrahimi AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15418*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pDedicatedAllocVector);
15419*b7893ccfSSadaf Ebrahimi if(pDedicatedAllocVector->empty() == false)
15420*b7893ccfSSadaf Ebrahimi {
15421*b7893ccfSSadaf Ebrahimi if(dedicatedAllocationsStarted == false)
15422*b7893ccfSSadaf Ebrahimi {
15423*b7893ccfSSadaf Ebrahimi dedicatedAllocationsStarted = true;
15424*b7893ccfSSadaf Ebrahimi json.WriteString("DedicatedAllocations");
15425*b7893ccfSSadaf Ebrahimi json.BeginObject();
15426*b7893ccfSSadaf Ebrahimi }
15427*b7893ccfSSadaf Ebrahimi
15428*b7893ccfSSadaf Ebrahimi json.BeginString("Type ");
15429*b7893ccfSSadaf Ebrahimi json.ContinueString(memTypeIndex);
15430*b7893ccfSSadaf Ebrahimi json.EndString();
15431*b7893ccfSSadaf Ebrahimi
15432*b7893ccfSSadaf Ebrahimi json.BeginArray();
15433*b7893ccfSSadaf Ebrahimi
15434*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15435*b7893ccfSSadaf Ebrahimi {
15436*b7893ccfSSadaf Ebrahimi json.BeginObject(true);
15437*b7893ccfSSadaf Ebrahimi const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15438*b7893ccfSSadaf Ebrahimi hAlloc->PrintParameters(json);
15439*b7893ccfSSadaf Ebrahimi json.EndObject();
15440*b7893ccfSSadaf Ebrahimi }
15441*b7893ccfSSadaf Ebrahimi
15442*b7893ccfSSadaf Ebrahimi json.EndArray();
15443*b7893ccfSSadaf Ebrahimi }
15444*b7893ccfSSadaf Ebrahimi }
15445*b7893ccfSSadaf Ebrahimi if(dedicatedAllocationsStarted)
15446*b7893ccfSSadaf Ebrahimi {
15447*b7893ccfSSadaf Ebrahimi json.EndObject();
15448*b7893ccfSSadaf Ebrahimi }
15449*b7893ccfSSadaf Ebrahimi
15450*b7893ccfSSadaf Ebrahimi {
15451*b7893ccfSSadaf Ebrahimi bool allocationsStarted = false;
15452*b7893ccfSSadaf Ebrahimi for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15453*b7893ccfSSadaf Ebrahimi {
15454*b7893ccfSSadaf Ebrahimi if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15455*b7893ccfSSadaf Ebrahimi {
15456*b7893ccfSSadaf Ebrahimi if(allocationsStarted == false)
15457*b7893ccfSSadaf Ebrahimi {
15458*b7893ccfSSadaf Ebrahimi allocationsStarted = true;
15459*b7893ccfSSadaf Ebrahimi json.WriteString("DefaultPools");
15460*b7893ccfSSadaf Ebrahimi json.BeginObject();
15461*b7893ccfSSadaf Ebrahimi }
15462*b7893ccfSSadaf Ebrahimi
15463*b7893ccfSSadaf Ebrahimi json.BeginString("Type ");
15464*b7893ccfSSadaf Ebrahimi json.ContinueString(memTypeIndex);
15465*b7893ccfSSadaf Ebrahimi json.EndString();
15466*b7893ccfSSadaf Ebrahimi
15467*b7893ccfSSadaf Ebrahimi m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15468*b7893ccfSSadaf Ebrahimi }
15469*b7893ccfSSadaf Ebrahimi }
15470*b7893ccfSSadaf Ebrahimi if(allocationsStarted)
15471*b7893ccfSSadaf Ebrahimi {
15472*b7893ccfSSadaf Ebrahimi json.EndObject();
15473*b7893ccfSSadaf Ebrahimi }
15474*b7893ccfSSadaf Ebrahimi }
15475*b7893ccfSSadaf Ebrahimi
15476*b7893ccfSSadaf Ebrahimi // Custom pools
15477*b7893ccfSSadaf Ebrahimi {
15478*b7893ccfSSadaf Ebrahimi VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15479*b7893ccfSSadaf Ebrahimi const size_t poolCount = m_Pools.size();
15480*b7893ccfSSadaf Ebrahimi if(poolCount > 0)
15481*b7893ccfSSadaf Ebrahimi {
15482*b7893ccfSSadaf Ebrahimi json.WriteString("Pools");
15483*b7893ccfSSadaf Ebrahimi json.BeginObject();
15484*b7893ccfSSadaf Ebrahimi for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15485*b7893ccfSSadaf Ebrahimi {
15486*b7893ccfSSadaf Ebrahimi json.BeginString();
15487*b7893ccfSSadaf Ebrahimi json.ContinueString(m_Pools[poolIndex]->GetId());
15488*b7893ccfSSadaf Ebrahimi json.EndString();
15489*b7893ccfSSadaf Ebrahimi
15490*b7893ccfSSadaf Ebrahimi m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15491*b7893ccfSSadaf Ebrahimi }
15492*b7893ccfSSadaf Ebrahimi json.EndObject();
15493*b7893ccfSSadaf Ebrahimi }
15494*b7893ccfSSadaf Ebrahimi }
15495*b7893ccfSSadaf Ebrahimi }
15496*b7893ccfSSadaf Ebrahimi
15497*b7893ccfSSadaf Ebrahimi #endif // #if VMA_STATS_STRING_ENABLED
15498*b7893ccfSSadaf Ebrahimi
15499*b7893ccfSSadaf Ebrahimi ////////////////////////////////////////////////////////////////////////////////
15500*b7893ccfSSadaf Ebrahimi // Public interface
15501*b7893ccfSSadaf Ebrahimi
vmaCreateAllocator(const VmaAllocatorCreateInfo * pCreateInfo,VmaAllocator * pAllocator)15502*b7893ccfSSadaf Ebrahimi VkResult vmaCreateAllocator(
15503*b7893ccfSSadaf Ebrahimi const VmaAllocatorCreateInfo* pCreateInfo,
15504*b7893ccfSSadaf Ebrahimi VmaAllocator* pAllocator)
15505*b7893ccfSSadaf Ebrahimi {
15506*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pCreateInfo && pAllocator);
15507*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaCreateAllocator");
15508*b7893ccfSSadaf Ebrahimi *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15509*b7893ccfSSadaf Ebrahimi return (*pAllocator)->Init(pCreateInfo);
15510*b7893ccfSSadaf Ebrahimi }
15511*b7893ccfSSadaf Ebrahimi
vmaDestroyAllocator(VmaAllocator allocator)15512*b7893ccfSSadaf Ebrahimi void vmaDestroyAllocator(
15513*b7893ccfSSadaf Ebrahimi VmaAllocator allocator)
15514*b7893ccfSSadaf Ebrahimi {
15515*b7893ccfSSadaf Ebrahimi if(allocator != VK_NULL_HANDLE)
15516*b7893ccfSSadaf Ebrahimi {
15517*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaDestroyAllocator");
15518*b7893ccfSSadaf Ebrahimi VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15519*b7893ccfSSadaf Ebrahimi vma_delete(&allocationCallbacks, allocator);
15520*b7893ccfSSadaf Ebrahimi }
15521*b7893ccfSSadaf Ebrahimi }
15522*b7893ccfSSadaf Ebrahimi
vmaGetPhysicalDeviceProperties(VmaAllocator allocator,const VkPhysicalDeviceProperties ** ppPhysicalDeviceProperties)15523*b7893ccfSSadaf Ebrahimi void vmaGetPhysicalDeviceProperties(
15524*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
15525*b7893ccfSSadaf Ebrahimi const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15526*b7893ccfSSadaf Ebrahimi {
15527*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15528*b7893ccfSSadaf Ebrahimi *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15529*b7893ccfSSadaf Ebrahimi }
15530*b7893ccfSSadaf Ebrahimi
vmaGetMemoryProperties(VmaAllocator allocator,const VkPhysicalDeviceMemoryProperties ** ppPhysicalDeviceMemoryProperties)15531*b7893ccfSSadaf Ebrahimi void vmaGetMemoryProperties(
15532*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
15533*b7893ccfSSadaf Ebrahimi const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15534*b7893ccfSSadaf Ebrahimi {
15535*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15536*b7893ccfSSadaf Ebrahimi *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15537*b7893ccfSSadaf Ebrahimi }
15538*b7893ccfSSadaf Ebrahimi
vmaGetMemoryTypeProperties(VmaAllocator allocator,uint32_t memoryTypeIndex,VkMemoryPropertyFlags * pFlags)15539*b7893ccfSSadaf Ebrahimi void vmaGetMemoryTypeProperties(
15540*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
15541*b7893ccfSSadaf Ebrahimi uint32_t memoryTypeIndex,
15542*b7893ccfSSadaf Ebrahimi VkMemoryPropertyFlags* pFlags)
15543*b7893ccfSSadaf Ebrahimi {
15544*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && pFlags);
15545*b7893ccfSSadaf Ebrahimi VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15546*b7893ccfSSadaf Ebrahimi *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15547*b7893ccfSSadaf Ebrahimi }
15548*b7893ccfSSadaf Ebrahimi
vmaSetCurrentFrameIndex(VmaAllocator allocator,uint32_t frameIndex)15549*b7893ccfSSadaf Ebrahimi void vmaSetCurrentFrameIndex(
15550*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
15551*b7893ccfSSadaf Ebrahimi uint32_t frameIndex)
15552*b7893ccfSSadaf Ebrahimi {
15553*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator);
15554*b7893ccfSSadaf Ebrahimi VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15555*b7893ccfSSadaf Ebrahimi
15556*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
15557*b7893ccfSSadaf Ebrahimi
15558*b7893ccfSSadaf Ebrahimi allocator->SetCurrentFrameIndex(frameIndex);
15559*b7893ccfSSadaf Ebrahimi }
15560*b7893ccfSSadaf Ebrahimi
vmaCalculateStats(VmaAllocator allocator,VmaStats * pStats)15561*b7893ccfSSadaf Ebrahimi void vmaCalculateStats(
15562*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
15563*b7893ccfSSadaf Ebrahimi VmaStats* pStats)
15564*b7893ccfSSadaf Ebrahimi {
15565*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && pStats);
15566*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
15567*b7893ccfSSadaf Ebrahimi allocator->CalculateStats(pStats);
15568*b7893ccfSSadaf Ebrahimi }
15569*b7893ccfSSadaf Ebrahimi
15570*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
15571*b7893ccfSSadaf Ebrahimi
vmaBuildStatsString(VmaAllocator allocator,char ** ppStatsString,VkBool32 detailedMap)15572*b7893ccfSSadaf Ebrahimi void vmaBuildStatsString(
15573*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
15574*b7893ccfSSadaf Ebrahimi char** ppStatsString,
15575*b7893ccfSSadaf Ebrahimi VkBool32 detailedMap)
15576*b7893ccfSSadaf Ebrahimi {
15577*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && ppStatsString);
15578*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
15579*b7893ccfSSadaf Ebrahimi
15580*b7893ccfSSadaf Ebrahimi VmaStringBuilder sb(allocator);
15581*b7893ccfSSadaf Ebrahimi {
15582*b7893ccfSSadaf Ebrahimi VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15583*b7893ccfSSadaf Ebrahimi json.BeginObject();
15584*b7893ccfSSadaf Ebrahimi
15585*b7893ccfSSadaf Ebrahimi VmaStats stats;
15586*b7893ccfSSadaf Ebrahimi allocator->CalculateStats(&stats);
15587*b7893ccfSSadaf Ebrahimi
15588*b7893ccfSSadaf Ebrahimi json.WriteString("Total");
15589*b7893ccfSSadaf Ebrahimi VmaPrintStatInfo(json, stats.total);
15590*b7893ccfSSadaf Ebrahimi
15591*b7893ccfSSadaf Ebrahimi for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15592*b7893ccfSSadaf Ebrahimi {
15593*b7893ccfSSadaf Ebrahimi json.BeginString("Heap ");
15594*b7893ccfSSadaf Ebrahimi json.ContinueString(heapIndex);
15595*b7893ccfSSadaf Ebrahimi json.EndString();
15596*b7893ccfSSadaf Ebrahimi json.BeginObject();
15597*b7893ccfSSadaf Ebrahimi
15598*b7893ccfSSadaf Ebrahimi json.WriteString("Size");
15599*b7893ccfSSadaf Ebrahimi json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15600*b7893ccfSSadaf Ebrahimi
15601*b7893ccfSSadaf Ebrahimi json.WriteString("Flags");
15602*b7893ccfSSadaf Ebrahimi json.BeginArray(true);
15603*b7893ccfSSadaf Ebrahimi if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15604*b7893ccfSSadaf Ebrahimi {
15605*b7893ccfSSadaf Ebrahimi json.WriteString("DEVICE_LOCAL");
15606*b7893ccfSSadaf Ebrahimi }
15607*b7893ccfSSadaf Ebrahimi json.EndArray();
15608*b7893ccfSSadaf Ebrahimi
15609*b7893ccfSSadaf Ebrahimi if(stats.memoryHeap[heapIndex].blockCount > 0)
15610*b7893ccfSSadaf Ebrahimi {
15611*b7893ccfSSadaf Ebrahimi json.WriteString("Stats");
15612*b7893ccfSSadaf Ebrahimi VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15613*b7893ccfSSadaf Ebrahimi }
15614*b7893ccfSSadaf Ebrahimi
15615*b7893ccfSSadaf Ebrahimi for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15616*b7893ccfSSadaf Ebrahimi {
15617*b7893ccfSSadaf Ebrahimi if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15618*b7893ccfSSadaf Ebrahimi {
15619*b7893ccfSSadaf Ebrahimi json.BeginString("Type ");
15620*b7893ccfSSadaf Ebrahimi json.ContinueString(typeIndex);
15621*b7893ccfSSadaf Ebrahimi json.EndString();
15622*b7893ccfSSadaf Ebrahimi
15623*b7893ccfSSadaf Ebrahimi json.BeginObject();
15624*b7893ccfSSadaf Ebrahimi
15625*b7893ccfSSadaf Ebrahimi json.WriteString("Flags");
15626*b7893ccfSSadaf Ebrahimi json.BeginArray(true);
15627*b7893ccfSSadaf Ebrahimi VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15628*b7893ccfSSadaf Ebrahimi if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15629*b7893ccfSSadaf Ebrahimi {
15630*b7893ccfSSadaf Ebrahimi json.WriteString("DEVICE_LOCAL");
15631*b7893ccfSSadaf Ebrahimi }
15632*b7893ccfSSadaf Ebrahimi if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15633*b7893ccfSSadaf Ebrahimi {
15634*b7893ccfSSadaf Ebrahimi json.WriteString("HOST_VISIBLE");
15635*b7893ccfSSadaf Ebrahimi }
15636*b7893ccfSSadaf Ebrahimi if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15637*b7893ccfSSadaf Ebrahimi {
15638*b7893ccfSSadaf Ebrahimi json.WriteString("HOST_COHERENT");
15639*b7893ccfSSadaf Ebrahimi }
15640*b7893ccfSSadaf Ebrahimi if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15641*b7893ccfSSadaf Ebrahimi {
15642*b7893ccfSSadaf Ebrahimi json.WriteString("HOST_CACHED");
15643*b7893ccfSSadaf Ebrahimi }
15644*b7893ccfSSadaf Ebrahimi if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15645*b7893ccfSSadaf Ebrahimi {
15646*b7893ccfSSadaf Ebrahimi json.WriteString("LAZILY_ALLOCATED");
15647*b7893ccfSSadaf Ebrahimi }
15648*b7893ccfSSadaf Ebrahimi json.EndArray();
15649*b7893ccfSSadaf Ebrahimi
15650*b7893ccfSSadaf Ebrahimi if(stats.memoryType[typeIndex].blockCount > 0)
15651*b7893ccfSSadaf Ebrahimi {
15652*b7893ccfSSadaf Ebrahimi json.WriteString("Stats");
15653*b7893ccfSSadaf Ebrahimi VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15654*b7893ccfSSadaf Ebrahimi }
15655*b7893ccfSSadaf Ebrahimi
15656*b7893ccfSSadaf Ebrahimi json.EndObject();
15657*b7893ccfSSadaf Ebrahimi }
15658*b7893ccfSSadaf Ebrahimi }
15659*b7893ccfSSadaf Ebrahimi
15660*b7893ccfSSadaf Ebrahimi json.EndObject();
15661*b7893ccfSSadaf Ebrahimi }
15662*b7893ccfSSadaf Ebrahimi if(detailedMap == VK_TRUE)
15663*b7893ccfSSadaf Ebrahimi {
15664*b7893ccfSSadaf Ebrahimi allocator->PrintDetailedMap(json);
15665*b7893ccfSSadaf Ebrahimi }
15666*b7893ccfSSadaf Ebrahimi
15667*b7893ccfSSadaf Ebrahimi json.EndObject();
15668*b7893ccfSSadaf Ebrahimi }
15669*b7893ccfSSadaf Ebrahimi
15670*b7893ccfSSadaf Ebrahimi const size_t len = sb.GetLength();
15671*b7893ccfSSadaf Ebrahimi char* const pChars = vma_new_array(allocator, char, len + 1);
15672*b7893ccfSSadaf Ebrahimi if(len > 0)
15673*b7893ccfSSadaf Ebrahimi {
15674*b7893ccfSSadaf Ebrahimi memcpy(pChars, sb.GetData(), len);
15675*b7893ccfSSadaf Ebrahimi }
15676*b7893ccfSSadaf Ebrahimi pChars[len] = '\0';
15677*b7893ccfSSadaf Ebrahimi *ppStatsString = pChars;
15678*b7893ccfSSadaf Ebrahimi }
15679*b7893ccfSSadaf Ebrahimi
vmaFreeStatsString(VmaAllocator allocator,char * pStatsString)15680*b7893ccfSSadaf Ebrahimi void vmaFreeStatsString(
15681*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
15682*b7893ccfSSadaf Ebrahimi char* pStatsString)
15683*b7893ccfSSadaf Ebrahimi {
15684*b7893ccfSSadaf Ebrahimi if(pStatsString != VMA_NULL)
15685*b7893ccfSSadaf Ebrahimi {
15686*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator);
15687*b7893ccfSSadaf Ebrahimi size_t len = strlen(pStatsString);
15688*b7893ccfSSadaf Ebrahimi vma_delete_array(allocator, pStatsString, len + 1);
15689*b7893ccfSSadaf Ebrahimi }
15690*b7893ccfSSadaf Ebrahimi }
15691*b7893ccfSSadaf Ebrahimi
15692*b7893ccfSSadaf Ebrahimi #endif // #if VMA_STATS_STRING_ENABLED
15693*b7893ccfSSadaf Ebrahimi
15694*b7893ccfSSadaf Ebrahimi /*
15695*b7893ccfSSadaf Ebrahimi This function is not protected by any mutex because it just reads immutable data.
15696*b7893ccfSSadaf Ebrahimi */
vmaFindMemoryTypeIndex(VmaAllocator allocator,uint32_t memoryTypeBits,const VmaAllocationCreateInfo * pAllocationCreateInfo,uint32_t * pMemoryTypeIndex)15697*b7893ccfSSadaf Ebrahimi VkResult vmaFindMemoryTypeIndex(
15698*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
15699*b7893ccfSSadaf Ebrahimi uint32_t memoryTypeBits,
15700*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo* pAllocationCreateInfo,
15701*b7893ccfSSadaf Ebrahimi uint32_t* pMemoryTypeIndex)
15702*b7893ccfSSadaf Ebrahimi {
15703*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator != VK_NULL_HANDLE);
15704*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15705*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15706*b7893ccfSSadaf Ebrahimi
15707*b7893ccfSSadaf Ebrahimi if(pAllocationCreateInfo->memoryTypeBits != 0)
15708*b7893ccfSSadaf Ebrahimi {
15709*b7893ccfSSadaf Ebrahimi memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15710*b7893ccfSSadaf Ebrahimi }
15711*b7893ccfSSadaf Ebrahimi
15712*b7893ccfSSadaf Ebrahimi uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15713*b7893ccfSSadaf Ebrahimi uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15714*b7893ccfSSadaf Ebrahimi
15715*b7893ccfSSadaf Ebrahimi const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
15716*b7893ccfSSadaf Ebrahimi if(mapped)
15717*b7893ccfSSadaf Ebrahimi {
15718*b7893ccfSSadaf Ebrahimi preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15719*b7893ccfSSadaf Ebrahimi }
15720*b7893ccfSSadaf Ebrahimi
15721*b7893ccfSSadaf Ebrahimi // Convert usage to requiredFlags and preferredFlags.
15722*b7893ccfSSadaf Ebrahimi switch(pAllocationCreateInfo->usage)
15723*b7893ccfSSadaf Ebrahimi {
15724*b7893ccfSSadaf Ebrahimi case VMA_MEMORY_USAGE_UNKNOWN:
15725*b7893ccfSSadaf Ebrahimi break;
15726*b7893ccfSSadaf Ebrahimi case VMA_MEMORY_USAGE_GPU_ONLY:
15727*b7893ccfSSadaf Ebrahimi if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15728*b7893ccfSSadaf Ebrahimi {
15729*b7893ccfSSadaf Ebrahimi preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15730*b7893ccfSSadaf Ebrahimi }
15731*b7893ccfSSadaf Ebrahimi break;
15732*b7893ccfSSadaf Ebrahimi case VMA_MEMORY_USAGE_CPU_ONLY:
15733*b7893ccfSSadaf Ebrahimi requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15734*b7893ccfSSadaf Ebrahimi break;
15735*b7893ccfSSadaf Ebrahimi case VMA_MEMORY_USAGE_CPU_TO_GPU:
15736*b7893ccfSSadaf Ebrahimi requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15737*b7893ccfSSadaf Ebrahimi if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15738*b7893ccfSSadaf Ebrahimi {
15739*b7893ccfSSadaf Ebrahimi preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15740*b7893ccfSSadaf Ebrahimi }
15741*b7893ccfSSadaf Ebrahimi break;
15742*b7893ccfSSadaf Ebrahimi case VMA_MEMORY_USAGE_GPU_TO_CPU:
15743*b7893ccfSSadaf Ebrahimi requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15744*b7893ccfSSadaf Ebrahimi preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15745*b7893ccfSSadaf Ebrahimi break;
15746*b7893ccfSSadaf Ebrahimi default:
15747*b7893ccfSSadaf Ebrahimi break;
15748*b7893ccfSSadaf Ebrahimi }
15749*b7893ccfSSadaf Ebrahimi
15750*b7893ccfSSadaf Ebrahimi *pMemoryTypeIndex = UINT32_MAX;
15751*b7893ccfSSadaf Ebrahimi uint32_t minCost = UINT32_MAX;
15752*b7893ccfSSadaf Ebrahimi for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15753*b7893ccfSSadaf Ebrahimi memTypeIndex < allocator->GetMemoryTypeCount();
15754*b7893ccfSSadaf Ebrahimi ++memTypeIndex, memTypeBit <<= 1)
15755*b7893ccfSSadaf Ebrahimi {
15756*b7893ccfSSadaf Ebrahimi // This memory type is acceptable according to memoryTypeBits bitmask.
15757*b7893ccfSSadaf Ebrahimi if((memTypeBit & memoryTypeBits) != 0)
15758*b7893ccfSSadaf Ebrahimi {
15759*b7893ccfSSadaf Ebrahimi const VkMemoryPropertyFlags currFlags =
15760*b7893ccfSSadaf Ebrahimi allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15761*b7893ccfSSadaf Ebrahimi // This memory type contains requiredFlags.
15762*b7893ccfSSadaf Ebrahimi if((requiredFlags & ~currFlags) == 0)
15763*b7893ccfSSadaf Ebrahimi {
15764*b7893ccfSSadaf Ebrahimi // Calculate cost as number of bits from preferredFlags not present in this memory type.
15765*b7893ccfSSadaf Ebrahimi uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15766*b7893ccfSSadaf Ebrahimi // Remember memory type with lowest cost.
15767*b7893ccfSSadaf Ebrahimi if(currCost < minCost)
15768*b7893ccfSSadaf Ebrahimi {
15769*b7893ccfSSadaf Ebrahimi *pMemoryTypeIndex = memTypeIndex;
15770*b7893ccfSSadaf Ebrahimi if(currCost == 0)
15771*b7893ccfSSadaf Ebrahimi {
15772*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
15773*b7893ccfSSadaf Ebrahimi }
15774*b7893ccfSSadaf Ebrahimi minCost = currCost;
15775*b7893ccfSSadaf Ebrahimi }
15776*b7893ccfSSadaf Ebrahimi }
15777*b7893ccfSSadaf Ebrahimi }
15778*b7893ccfSSadaf Ebrahimi }
15779*b7893ccfSSadaf Ebrahimi return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
15780*b7893ccfSSadaf Ebrahimi }
15781*b7893ccfSSadaf Ebrahimi
vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator,const VkBufferCreateInfo * pBufferCreateInfo,const VmaAllocationCreateInfo * pAllocationCreateInfo,uint32_t * pMemoryTypeIndex)15782*b7893ccfSSadaf Ebrahimi VkResult vmaFindMemoryTypeIndexForBufferInfo(
15783*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
15784*b7893ccfSSadaf Ebrahimi const VkBufferCreateInfo* pBufferCreateInfo,
15785*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo* pAllocationCreateInfo,
15786*b7893ccfSSadaf Ebrahimi uint32_t* pMemoryTypeIndex)
15787*b7893ccfSSadaf Ebrahimi {
15788*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator != VK_NULL_HANDLE);
15789*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
15790*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15791*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15792*b7893ccfSSadaf Ebrahimi
15793*b7893ccfSSadaf Ebrahimi const VkDevice hDev = allocator->m_hDevice;
15794*b7893ccfSSadaf Ebrahimi VkBuffer hBuffer = VK_NULL_HANDLE;
15795*b7893ccfSSadaf Ebrahimi VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
15796*b7893ccfSSadaf Ebrahimi hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
15797*b7893ccfSSadaf Ebrahimi if(res == VK_SUCCESS)
15798*b7893ccfSSadaf Ebrahimi {
15799*b7893ccfSSadaf Ebrahimi VkMemoryRequirements memReq = {};
15800*b7893ccfSSadaf Ebrahimi allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
15801*b7893ccfSSadaf Ebrahimi hDev, hBuffer, &memReq);
15802*b7893ccfSSadaf Ebrahimi
15803*b7893ccfSSadaf Ebrahimi res = vmaFindMemoryTypeIndex(
15804*b7893ccfSSadaf Ebrahimi allocator,
15805*b7893ccfSSadaf Ebrahimi memReq.memoryTypeBits,
15806*b7893ccfSSadaf Ebrahimi pAllocationCreateInfo,
15807*b7893ccfSSadaf Ebrahimi pMemoryTypeIndex);
15808*b7893ccfSSadaf Ebrahimi
15809*b7893ccfSSadaf Ebrahimi allocator->GetVulkanFunctions().vkDestroyBuffer(
15810*b7893ccfSSadaf Ebrahimi hDev, hBuffer, allocator->GetAllocationCallbacks());
15811*b7893ccfSSadaf Ebrahimi }
15812*b7893ccfSSadaf Ebrahimi return res;
15813*b7893ccfSSadaf Ebrahimi }
15814*b7893ccfSSadaf Ebrahimi
vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator,const VkImageCreateInfo * pImageCreateInfo,const VmaAllocationCreateInfo * pAllocationCreateInfo,uint32_t * pMemoryTypeIndex)15815*b7893ccfSSadaf Ebrahimi VkResult vmaFindMemoryTypeIndexForImageInfo(
15816*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
15817*b7893ccfSSadaf Ebrahimi const VkImageCreateInfo* pImageCreateInfo,
15818*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo* pAllocationCreateInfo,
15819*b7893ccfSSadaf Ebrahimi uint32_t* pMemoryTypeIndex)
15820*b7893ccfSSadaf Ebrahimi {
15821*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator != VK_NULL_HANDLE);
15822*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pImageCreateInfo != VMA_NULL);
15823*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15824*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15825*b7893ccfSSadaf Ebrahimi
15826*b7893ccfSSadaf Ebrahimi const VkDevice hDev = allocator->m_hDevice;
15827*b7893ccfSSadaf Ebrahimi VkImage hImage = VK_NULL_HANDLE;
15828*b7893ccfSSadaf Ebrahimi VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
15829*b7893ccfSSadaf Ebrahimi hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
15830*b7893ccfSSadaf Ebrahimi if(res == VK_SUCCESS)
15831*b7893ccfSSadaf Ebrahimi {
15832*b7893ccfSSadaf Ebrahimi VkMemoryRequirements memReq = {};
15833*b7893ccfSSadaf Ebrahimi allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
15834*b7893ccfSSadaf Ebrahimi hDev, hImage, &memReq);
15835*b7893ccfSSadaf Ebrahimi
15836*b7893ccfSSadaf Ebrahimi res = vmaFindMemoryTypeIndex(
15837*b7893ccfSSadaf Ebrahimi allocator,
15838*b7893ccfSSadaf Ebrahimi memReq.memoryTypeBits,
15839*b7893ccfSSadaf Ebrahimi pAllocationCreateInfo,
15840*b7893ccfSSadaf Ebrahimi pMemoryTypeIndex);
15841*b7893ccfSSadaf Ebrahimi
15842*b7893ccfSSadaf Ebrahimi allocator->GetVulkanFunctions().vkDestroyImage(
15843*b7893ccfSSadaf Ebrahimi hDev, hImage, allocator->GetAllocationCallbacks());
15844*b7893ccfSSadaf Ebrahimi }
15845*b7893ccfSSadaf Ebrahimi return res;
15846*b7893ccfSSadaf Ebrahimi }
15847*b7893ccfSSadaf Ebrahimi
vmaCreatePool(VmaAllocator allocator,const VmaPoolCreateInfo * pCreateInfo,VmaPool * pPool)15848*b7893ccfSSadaf Ebrahimi VkResult vmaCreatePool(
15849*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
15850*b7893ccfSSadaf Ebrahimi const VmaPoolCreateInfo* pCreateInfo,
15851*b7893ccfSSadaf Ebrahimi VmaPool* pPool)
15852*b7893ccfSSadaf Ebrahimi {
15853*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && pCreateInfo && pPool);
15854*b7893ccfSSadaf Ebrahimi
15855*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaCreatePool");
15856*b7893ccfSSadaf Ebrahimi
15857*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
15858*b7893ccfSSadaf Ebrahimi
15859*b7893ccfSSadaf Ebrahimi VkResult res = allocator->CreatePool(pCreateInfo, pPool);
15860*b7893ccfSSadaf Ebrahimi
15861*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
15862*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
15863*b7893ccfSSadaf Ebrahimi {
15864*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
15865*b7893ccfSSadaf Ebrahimi }
15866*b7893ccfSSadaf Ebrahimi #endif
15867*b7893ccfSSadaf Ebrahimi
15868*b7893ccfSSadaf Ebrahimi return res;
15869*b7893ccfSSadaf Ebrahimi }
15870*b7893ccfSSadaf Ebrahimi
vmaDestroyPool(VmaAllocator allocator,VmaPool pool)15871*b7893ccfSSadaf Ebrahimi void vmaDestroyPool(
15872*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
15873*b7893ccfSSadaf Ebrahimi VmaPool pool)
15874*b7893ccfSSadaf Ebrahimi {
15875*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator);
15876*b7893ccfSSadaf Ebrahimi
15877*b7893ccfSSadaf Ebrahimi if(pool == VK_NULL_HANDLE)
15878*b7893ccfSSadaf Ebrahimi {
15879*b7893ccfSSadaf Ebrahimi return;
15880*b7893ccfSSadaf Ebrahimi }
15881*b7893ccfSSadaf Ebrahimi
15882*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaDestroyPool");
15883*b7893ccfSSadaf Ebrahimi
15884*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
15885*b7893ccfSSadaf Ebrahimi
15886*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
15887*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
15888*b7893ccfSSadaf Ebrahimi {
15889*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
15890*b7893ccfSSadaf Ebrahimi }
15891*b7893ccfSSadaf Ebrahimi #endif
15892*b7893ccfSSadaf Ebrahimi
15893*b7893ccfSSadaf Ebrahimi allocator->DestroyPool(pool);
15894*b7893ccfSSadaf Ebrahimi }
15895*b7893ccfSSadaf Ebrahimi
vmaGetPoolStats(VmaAllocator allocator,VmaPool pool,VmaPoolStats * pPoolStats)15896*b7893ccfSSadaf Ebrahimi void vmaGetPoolStats(
15897*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
15898*b7893ccfSSadaf Ebrahimi VmaPool pool,
15899*b7893ccfSSadaf Ebrahimi VmaPoolStats* pPoolStats)
15900*b7893ccfSSadaf Ebrahimi {
15901*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && pool && pPoolStats);
15902*b7893ccfSSadaf Ebrahimi
15903*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
15904*b7893ccfSSadaf Ebrahimi
15905*b7893ccfSSadaf Ebrahimi allocator->GetPoolStats(pool, pPoolStats);
15906*b7893ccfSSadaf Ebrahimi }
15907*b7893ccfSSadaf Ebrahimi
vmaMakePoolAllocationsLost(VmaAllocator allocator,VmaPool pool,size_t * pLostAllocationCount)15908*b7893ccfSSadaf Ebrahimi void vmaMakePoolAllocationsLost(
15909*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
15910*b7893ccfSSadaf Ebrahimi VmaPool pool,
15911*b7893ccfSSadaf Ebrahimi size_t* pLostAllocationCount)
15912*b7893ccfSSadaf Ebrahimi {
15913*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && pool);
15914*b7893ccfSSadaf Ebrahimi
15915*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
15916*b7893ccfSSadaf Ebrahimi
15917*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
15918*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
15919*b7893ccfSSadaf Ebrahimi {
15920*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
15921*b7893ccfSSadaf Ebrahimi }
15922*b7893ccfSSadaf Ebrahimi #endif
15923*b7893ccfSSadaf Ebrahimi
15924*b7893ccfSSadaf Ebrahimi allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
15925*b7893ccfSSadaf Ebrahimi }
15926*b7893ccfSSadaf Ebrahimi
vmaCheckPoolCorruption(VmaAllocator allocator,VmaPool pool)15927*b7893ccfSSadaf Ebrahimi VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
15928*b7893ccfSSadaf Ebrahimi {
15929*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && pool);
15930*b7893ccfSSadaf Ebrahimi
15931*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
15932*b7893ccfSSadaf Ebrahimi
15933*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaCheckPoolCorruption");
15934*b7893ccfSSadaf Ebrahimi
15935*b7893ccfSSadaf Ebrahimi return allocator->CheckPoolCorruption(pool);
15936*b7893ccfSSadaf Ebrahimi }
15937*b7893ccfSSadaf Ebrahimi
vmaAllocateMemory(VmaAllocator allocator,const VkMemoryRequirements * pVkMemoryRequirements,const VmaAllocationCreateInfo * pCreateInfo,VmaAllocation * pAllocation,VmaAllocationInfo * pAllocationInfo)15938*b7893ccfSSadaf Ebrahimi VkResult vmaAllocateMemory(
15939*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
15940*b7893ccfSSadaf Ebrahimi const VkMemoryRequirements* pVkMemoryRequirements,
15941*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo* pCreateInfo,
15942*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocation,
15943*b7893ccfSSadaf Ebrahimi VmaAllocationInfo* pAllocationInfo)
15944*b7893ccfSSadaf Ebrahimi {
15945*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
15946*b7893ccfSSadaf Ebrahimi
15947*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaAllocateMemory");
15948*b7893ccfSSadaf Ebrahimi
15949*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
15950*b7893ccfSSadaf Ebrahimi
15951*b7893ccfSSadaf Ebrahimi VkResult result = allocator->AllocateMemory(
15952*b7893ccfSSadaf Ebrahimi *pVkMemoryRequirements,
15953*b7893ccfSSadaf Ebrahimi false, // requiresDedicatedAllocation
15954*b7893ccfSSadaf Ebrahimi false, // prefersDedicatedAllocation
15955*b7893ccfSSadaf Ebrahimi VK_NULL_HANDLE, // dedicatedBuffer
15956*b7893ccfSSadaf Ebrahimi VK_NULL_HANDLE, // dedicatedImage
15957*b7893ccfSSadaf Ebrahimi *pCreateInfo,
15958*b7893ccfSSadaf Ebrahimi VMA_SUBALLOCATION_TYPE_UNKNOWN,
15959*b7893ccfSSadaf Ebrahimi 1, // allocationCount
15960*b7893ccfSSadaf Ebrahimi pAllocation);
15961*b7893ccfSSadaf Ebrahimi
15962*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
15963*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
15964*b7893ccfSSadaf Ebrahimi {
15965*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordAllocateMemory(
15966*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(),
15967*b7893ccfSSadaf Ebrahimi *pVkMemoryRequirements,
15968*b7893ccfSSadaf Ebrahimi *pCreateInfo,
15969*b7893ccfSSadaf Ebrahimi *pAllocation);
15970*b7893ccfSSadaf Ebrahimi }
15971*b7893ccfSSadaf Ebrahimi #endif
15972*b7893ccfSSadaf Ebrahimi
15973*b7893ccfSSadaf Ebrahimi if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
15974*b7893ccfSSadaf Ebrahimi {
15975*b7893ccfSSadaf Ebrahimi allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15976*b7893ccfSSadaf Ebrahimi }
15977*b7893ccfSSadaf Ebrahimi
15978*b7893ccfSSadaf Ebrahimi return result;
15979*b7893ccfSSadaf Ebrahimi }
15980*b7893ccfSSadaf Ebrahimi
vmaAllocateMemoryPages(VmaAllocator allocator,const VkMemoryRequirements * pVkMemoryRequirements,const VmaAllocationCreateInfo * pCreateInfo,size_t allocationCount,VmaAllocation * pAllocations,VmaAllocationInfo * pAllocationInfo)15981*b7893ccfSSadaf Ebrahimi VkResult vmaAllocateMemoryPages(
15982*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
15983*b7893ccfSSadaf Ebrahimi const VkMemoryRequirements* pVkMemoryRequirements,
15984*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo* pCreateInfo,
15985*b7893ccfSSadaf Ebrahimi size_t allocationCount,
15986*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocations,
15987*b7893ccfSSadaf Ebrahimi VmaAllocationInfo* pAllocationInfo)
15988*b7893ccfSSadaf Ebrahimi {
15989*b7893ccfSSadaf Ebrahimi if(allocationCount == 0)
15990*b7893ccfSSadaf Ebrahimi {
15991*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
15992*b7893ccfSSadaf Ebrahimi }
15993*b7893ccfSSadaf Ebrahimi
15994*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
15995*b7893ccfSSadaf Ebrahimi
15996*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaAllocateMemoryPages");
15997*b7893ccfSSadaf Ebrahimi
15998*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
15999*b7893ccfSSadaf Ebrahimi
16000*b7893ccfSSadaf Ebrahimi VkResult result = allocator->AllocateMemory(
16001*b7893ccfSSadaf Ebrahimi *pVkMemoryRequirements,
16002*b7893ccfSSadaf Ebrahimi false, // requiresDedicatedAllocation
16003*b7893ccfSSadaf Ebrahimi false, // prefersDedicatedAllocation
16004*b7893ccfSSadaf Ebrahimi VK_NULL_HANDLE, // dedicatedBuffer
16005*b7893ccfSSadaf Ebrahimi VK_NULL_HANDLE, // dedicatedImage
16006*b7893ccfSSadaf Ebrahimi *pCreateInfo,
16007*b7893ccfSSadaf Ebrahimi VMA_SUBALLOCATION_TYPE_UNKNOWN,
16008*b7893ccfSSadaf Ebrahimi allocationCount,
16009*b7893ccfSSadaf Ebrahimi pAllocations);
16010*b7893ccfSSadaf Ebrahimi
16011*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
16012*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
16013*b7893ccfSSadaf Ebrahimi {
16014*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordAllocateMemoryPages(
16015*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(),
16016*b7893ccfSSadaf Ebrahimi *pVkMemoryRequirements,
16017*b7893ccfSSadaf Ebrahimi *pCreateInfo,
16018*b7893ccfSSadaf Ebrahimi (uint64_t)allocationCount,
16019*b7893ccfSSadaf Ebrahimi pAllocations);
16020*b7893ccfSSadaf Ebrahimi }
16021*b7893ccfSSadaf Ebrahimi #endif
16022*b7893ccfSSadaf Ebrahimi
16023*b7893ccfSSadaf Ebrahimi if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16024*b7893ccfSSadaf Ebrahimi {
16025*b7893ccfSSadaf Ebrahimi for(size_t i = 0; i < allocationCount; ++i)
16026*b7893ccfSSadaf Ebrahimi {
16027*b7893ccfSSadaf Ebrahimi allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16028*b7893ccfSSadaf Ebrahimi }
16029*b7893ccfSSadaf Ebrahimi }
16030*b7893ccfSSadaf Ebrahimi
16031*b7893ccfSSadaf Ebrahimi return result;
16032*b7893ccfSSadaf Ebrahimi }
16033*b7893ccfSSadaf Ebrahimi
vmaAllocateMemoryForBuffer(VmaAllocator allocator,VkBuffer buffer,const VmaAllocationCreateInfo * pCreateInfo,VmaAllocation * pAllocation,VmaAllocationInfo * pAllocationInfo)16034*b7893ccfSSadaf Ebrahimi VkResult vmaAllocateMemoryForBuffer(
16035*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
16036*b7893ccfSSadaf Ebrahimi VkBuffer buffer,
16037*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo* pCreateInfo,
16038*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocation,
16039*b7893ccfSSadaf Ebrahimi VmaAllocationInfo* pAllocationInfo)
16040*b7893ccfSSadaf Ebrahimi {
16041*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16042*b7893ccfSSadaf Ebrahimi
16043*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16044*b7893ccfSSadaf Ebrahimi
16045*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16046*b7893ccfSSadaf Ebrahimi
16047*b7893ccfSSadaf Ebrahimi VkMemoryRequirements vkMemReq = {};
16048*b7893ccfSSadaf Ebrahimi bool requiresDedicatedAllocation = false;
16049*b7893ccfSSadaf Ebrahimi bool prefersDedicatedAllocation = false;
16050*b7893ccfSSadaf Ebrahimi allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16051*b7893ccfSSadaf Ebrahimi requiresDedicatedAllocation,
16052*b7893ccfSSadaf Ebrahimi prefersDedicatedAllocation);
16053*b7893ccfSSadaf Ebrahimi
16054*b7893ccfSSadaf Ebrahimi VkResult result = allocator->AllocateMemory(
16055*b7893ccfSSadaf Ebrahimi vkMemReq,
16056*b7893ccfSSadaf Ebrahimi requiresDedicatedAllocation,
16057*b7893ccfSSadaf Ebrahimi prefersDedicatedAllocation,
16058*b7893ccfSSadaf Ebrahimi buffer, // dedicatedBuffer
16059*b7893ccfSSadaf Ebrahimi VK_NULL_HANDLE, // dedicatedImage
16060*b7893ccfSSadaf Ebrahimi *pCreateInfo,
16061*b7893ccfSSadaf Ebrahimi VMA_SUBALLOCATION_TYPE_BUFFER,
16062*b7893ccfSSadaf Ebrahimi 1, // allocationCount
16063*b7893ccfSSadaf Ebrahimi pAllocation);
16064*b7893ccfSSadaf Ebrahimi
16065*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
16066*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
16067*b7893ccfSSadaf Ebrahimi {
16068*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16069*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(),
16070*b7893ccfSSadaf Ebrahimi vkMemReq,
16071*b7893ccfSSadaf Ebrahimi requiresDedicatedAllocation,
16072*b7893ccfSSadaf Ebrahimi prefersDedicatedAllocation,
16073*b7893ccfSSadaf Ebrahimi *pCreateInfo,
16074*b7893ccfSSadaf Ebrahimi *pAllocation);
16075*b7893ccfSSadaf Ebrahimi }
16076*b7893ccfSSadaf Ebrahimi #endif
16077*b7893ccfSSadaf Ebrahimi
16078*b7893ccfSSadaf Ebrahimi if(pAllocationInfo && result == VK_SUCCESS)
16079*b7893ccfSSadaf Ebrahimi {
16080*b7893ccfSSadaf Ebrahimi allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16081*b7893ccfSSadaf Ebrahimi }
16082*b7893ccfSSadaf Ebrahimi
16083*b7893ccfSSadaf Ebrahimi return result;
16084*b7893ccfSSadaf Ebrahimi }
16085*b7893ccfSSadaf Ebrahimi
vmaAllocateMemoryForImage(VmaAllocator allocator,VkImage image,const VmaAllocationCreateInfo * pCreateInfo,VmaAllocation * pAllocation,VmaAllocationInfo * pAllocationInfo)16086*b7893ccfSSadaf Ebrahimi VkResult vmaAllocateMemoryForImage(
16087*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
16088*b7893ccfSSadaf Ebrahimi VkImage image,
16089*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo* pCreateInfo,
16090*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocation,
16091*b7893ccfSSadaf Ebrahimi VmaAllocationInfo* pAllocationInfo)
16092*b7893ccfSSadaf Ebrahimi {
16093*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16094*b7893ccfSSadaf Ebrahimi
16095*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16096*b7893ccfSSadaf Ebrahimi
16097*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16098*b7893ccfSSadaf Ebrahimi
16099*b7893ccfSSadaf Ebrahimi VkMemoryRequirements vkMemReq = {};
16100*b7893ccfSSadaf Ebrahimi bool requiresDedicatedAllocation = false;
16101*b7893ccfSSadaf Ebrahimi bool prefersDedicatedAllocation = false;
16102*b7893ccfSSadaf Ebrahimi allocator->GetImageMemoryRequirements(image, vkMemReq,
16103*b7893ccfSSadaf Ebrahimi requiresDedicatedAllocation, prefersDedicatedAllocation);
16104*b7893ccfSSadaf Ebrahimi
16105*b7893ccfSSadaf Ebrahimi VkResult result = allocator->AllocateMemory(
16106*b7893ccfSSadaf Ebrahimi vkMemReq,
16107*b7893ccfSSadaf Ebrahimi requiresDedicatedAllocation,
16108*b7893ccfSSadaf Ebrahimi prefersDedicatedAllocation,
16109*b7893ccfSSadaf Ebrahimi VK_NULL_HANDLE, // dedicatedBuffer
16110*b7893ccfSSadaf Ebrahimi image, // dedicatedImage
16111*b7893ccfSSadaf Ebrahimi *pCreateInfo,
16112*b7893ccfSSadaf Ebrahimi VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16113*b7893ccfSSadaf Ebrahimi 1, // allocationCount
16114*b7893ccfSSadaf Ebrahimi pAllocation);
16115*b7893ccfSSadaf Ebrahimi
16116*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
16117*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
16118*b7893ccfSSadaf Ebrahimi {
16119*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordAllocateMemoryForImage(
16120*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(),
16121*b7893ccfSSadaf Ebrahimi vkMemReq,
16122*b7893ccfSSadaf Ebrahimi requiresDedicatedAllocation,
16123*b7893ccfSSadaf Ebrahimi prefersDedicatedAllocation,
16124*b7893ccfSSadaf Ebrahimi *pCreateInfo,
16125*b7893ccfSSadaf Ebrahimi *pAllocation);
16126*b7893ccfSSadaf Ebrahimi }
16127*b7893ccfSSadaf Ebrahimi #endif
16128*b7893ccfSSadaf Ebrahimi
16129*b7893ccfSSadaf Ebrahimi if(pAllocationInfo && result == VK_SUCCESS)
16130*b7893ccfSSadaf Ebrahimi {
16131*b7893ccfSSadaf Ebrahimi allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16132*b7893ccfSSadaf Ebrahimi }
16133*b7893ccfSSadaf Ebrahimi
16134*b7893ccfSSadaf Ebrahimi return result;
16135*b7893ccfSSadaf Ebrahimi }
16136*b7893ccfSSadaf Ebrahimi
vmaFreeMemory(VmaAllocator allocator,VmaAllocation allocation)16137*b7893ccfSSadaf Ebrahimi void vmaFreeMemory(
16138*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
16139*b7893ccfSSadaf Ebrahimi VmaAllocation allocation)
16140*b7893ccfSSadaf Ebrahimi {
16141*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator);
16142*b7893ccfSSadaf Ebrahimi
16143*b7893ccfSSadaf Ebrahimi if(allocation == VK_NULL_HANDLE)
16144*b7893ccfSSadaf Ebrahimi {
16145*b7893ccfSSadaf Ebrahimi return;
16146*b7893ccfSSadaf Ebrahimi }
16147*b7893ccfSSadaf Ebrahimi
16148*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaFreeMemory");
16149*b7893ccfSSadaf Ebrahimi
16150*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16151*b7893ccfSSadaf Ebrahimi
16152*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
16153*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
16154*b7893ccfSSadaf Ebrahimi {
16155*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordFreeMemory(
16156*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(),
16157*b7893ccfSSadaf Ebrahimi allocation);
16158*b7893ccfSSadaf Ebrahimi }
16159*b7893ccfSSadaf Ebrahimi #endif
16160*b7893ccfSSadaf Ebrahimi
16161*b7893ccfSSadaf Ebrahimi allocator->FreeMemory(
16162*b7893ccfSSadaf Ebrahimi 1, // allocationCount
16163*b7893ccfSSadaf Ebrahimi &allocation);
16164*b7893ccfSSadaf Ebrahimi }
16165*b7893ccfSSadaf Ebrahimi
vmaFreeMemoryPages(VmaAllocator allocator,size_t allocationCount,VmaAllocation * pAllocations)16166*b7893ccfSSadaf Ebrahimi void vmaFreeMemoryPages(
16167*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
16168*b7893ccfSSadaf Ebrahimi size_t allocationCount,
16169*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocations)
16170*b7893ccfSSadaf Ebrahimi {
16171*b7893ccfSSadaf Ebrahimi if(allocationCount == 0)
16172*b7893ccfSSadaf Ebrahimi {
16173*b7893ccfSSadaf Ebrahimi return;
16174*b7893ccfSSadaf Ebrahimi }
16175*b7893ccfSSadaf Ebrahimi
16176*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator);
16177*b7893ccfSSadaf Ebrahimi
16178*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaFreeMemoryPages");
16179*b7893ccfSSadaf Ebrahimi
16180*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16181*b7893ccfSSadaf Ebrahimi
16182*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
16183*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
16184*b7893ccfSSadaf Ebrahimi {
16185*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordFreeMemoryPages(
16186*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(),
16187*b7893ccfSSadaf Ebrahimi (uint64_t)allocationCount,
16188*b7893ccfSSadaf Ebrahimi pAllocations);
16189*b7893ccfSSadaf Ebrahimi }
16190*b7893ccfSSadaf Ebrahimi #endif
16191*b7893ccfSSadaf Ebrahimi
16192*b7893ccfSSadaf Ebrahimi allocator->FreeMemory(allocationCount, pAllocations);
16193*b7893ccfSSadaf Ebrahimi }
16194*b7893ccfSSadaf Ebrahimi
vmaResizeAllocation(VmaAllocator allocator,VmaAllocation allocation,VkDeviceSize newSize)16195*b7893ccfSSadaf Ebrahimi VkResult vmaResizeAllocation(
16196*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
16197*b7893ccfSSadaf Ebrahimi VmaAllocation allocation,
16198*b7893ccfSSadaf Ebrahimi VkDeviceSize newSize)
16199*b7893ccfSSadaf Ebrahimi {
16200*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && allocation);
16201*b7893ccfSSadaf Ebrahimi
16202*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaResizeAllocation");
16203*b7893ccfSSadaf Ebrahimi
16204*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16205*b7893ccfSSadaf Ebrahimi
16206*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
16207*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
16208*b7893ccfSSadaf Ebrahimi {
16209*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordResizeAllocation(
16210*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(),
16211*b7893ccfSSadaf Ebrahimi allocation,
16212*b7893ccfSSadaf Ebrahimi newSize);
16213*b7893ccfSSadaf Ebrahimi }
16214*b7893ccfSSadaf Ebrahimi #endif
16215*b7893ccfSSadaf Ebrahimi
16216*b7893ccfSSadaf Ebrahimi return allocator->ResizeAllocation(allocation, newSize);
16217*b7893ccfSSadaf Ebrahimi }
16218*b7893ccfSSadaf Ebrahimi
vmaGetAllocationInfo(VmaAllocator allocator,VmaAllocation allocation,VmaAllocationInfo * pAllocationInfo)16219*b7893ccfSSadaf Ebrahimi void vmaGetAllocationInfo(
16220*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
16221*b7893ccfSSadaf Ebrahimi VmaAllocation allocation,
16222*b7893ccfSSadaf Ebrahimi VmaAllocationInfo* pAllocationInfo)
16223*b7893ccfSSadaf Ebrahimi {
16224*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && allocation && pAllocationInfo);
16225*b7893ccfSSadaf Ebrahimi
16226*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16227*b7893ccfSSadaf Ebrahimi
16228*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
16229*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
16230*b7893ccfSSadaf Ebrahimi {
16231*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordGetAllocationInfo(
16232*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(),
16233*b7893ccfSSadaf Ebrahimi allocation);
16234*b7893ccfSSadaf Ebrahimi }
16235*b7893ccfSSadaf Ebrahimi #endif
16236*b7893ccfSSadaf Ebrahimi
16237*b7893ccfSSadaf Ebrahimi allocator->GetAllocationInfo(allocation, pAllocationInfo);
16238*b7893ccfSSadaf Ebrahimi }
16239*b7893ccfSSadaf Ebrahimi
vmaTouchAllocation(VmaAllocator allocator,VmaAllocation allocation)16240*b7893ccfSSadaf Ebrahimi VkBool32 vmaTouchAllocation(
16241*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
16242*b7893ccfSSadaf Ebrahimi VmaAllocation allocation)
16243*b7893ccfSSadaf Ebrahimi {
16244*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && allocation);
16245*b7893ccfSSadaf Ebrahimi
16246*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16247*b7893ccfSSadaf Ebrahimi
16248*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
16249*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
16250*b7893ccfSSadaf Ebrahimi {
16251*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordTouchAllocation(
16252*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(),
16253*b7893ccfSSadaf Ebrahimi allocation);
16254*b7893ccfSSadaf Ebrahimi }
16255*b7893ccfSSadaf Ebrahimi #endif
16256*b7893ccfSSadaf Ebrahimi
16257*b7893ccfSSadaf Ebrahimi return allocator->TouchAllocation(allocation);
16258*b7893ccfSSadaf Ebrahimi }
16259*b7893ccfSSadaf Ebrahimi
vmaSetAllocationUserData(VmaAllocator allocator,VmaAllocation allocation,void * pUserData)16260*b7893ccfSSadaf Ebrahimi void vmaSetAllocationUserData(
16261*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
16262*b7893ccfSSadaf Ebrahimi VmaAllocation allocation,
16263*b7893ccfSSadaf Ebrahimi void* pUserData)
16264*b7893ccfSSadaf Ebrahimi {
16265*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && allocation);
16266*b7893ccfSSadaf Ebrahimi
16267*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16268*b7893ccfSSadaf Ebrahimi
16269*b7893ccfSSadaf Ebrahimi allocation->SetUserData(allocator, pUserData);
16270*b7893ccfSSadaf Ebrahimi
16271*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
16272*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
16273*b7893ccfSSadaf Ebrahimi {
16274*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordSetAllocationUserData(
16275*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(),
16276*b7893ccfSSadaf Ebrahimi allocation,
16277*b7893ccfSSadaf Ebrahimi pUserData);
16278*b7893ccfSSadaf Ebrahimi }
16279*b7893ccfSSadaf Ebrahimi #endif
16280*b7893ccfSSadaf Ebrahimi }
16281*b7893ccfSSadaf Ebrahimi
vmaCreateLostAllocation(VmaAllocator allocator,VmaAllocation * pAllocation)16282*b7893ccfSSadaf Ebrahimi void vmaCreateLostAllocation(
16283*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
16284*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocation)
16285*b7893ccfSSadaf Ebrahimi {
16286*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && pAllocation);
16287*b7893ccfSSadaf Ebrahimi
16288*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16289*b7893ccfSSadaf Ebrahimi
16290*b7893ccfSSadaf Ebrahimi allocator->CreateLostAllocation(pAllocation);
16291*b7893ccfSSadaf Ebrahimi
16292*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
16293*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
16294*b7893ccfSSadaf Ebrahimi {
16295*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordCreateLostAllocation(
16296*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(),
16297*b7893ccfSSadaf Ebrahimi *pAllocation);
16298*b7893ccfSSadaf Ebrahimi }
16299*b7893ccfSSadaf Ebrahimi #endif
16300*b7893ccfSSadaf Ebrahimi }
16301*b7893ccfSSadaf Ebrahimi
vmaMapMemory(VmaAllocator allocator,VmaAllocation allocation,void ** ppData)16302*b7893ccfSSadaf Ebrahimi VkResult vmaMapMemory(
16303*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
16304*b7893ccfSSadaf Ebrahimi VmaAllocation allocation,
16305*b7893ccfSSadaf Ebrahimi void** ppData)
16306*b7893ccfSSadaf Ebrahimi {
16307*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && allocation && ppData);
16308*b7893ccfSSadaf Ebrahimi
16309*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16310*b7893ccfSSadaf Ebrahimi
16311*b7893ccfSSadaf Ebrahimi VkResult res = allocator->Map(allocation, ppData);
16312*b7893ccfSSadaf Ebrahimi
16313*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
16314*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
16315*b7893ccfSSadaf Ebrahimi {
16316*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordMapMemory(
16317*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(),
16318*b7893ccfSSadaf Ebrahimi allocation);
16319*b7893ccfSSadaf Ebrahimi }
16320*b7893ccfSSadaf Ebrahimi #endif
16321*b7893ccfSSadaf Ebrahimi
16322*b7893ccfSSadaf Ebrahimi return res;
16323*b7893ccfSSadaf Ebrahimi }
16324*b7893ccfSSadaf Ebrahimi
vmaUnmapMemory(VmaAllocator allocator,VmaAllocation allocation)16325*b7893ccfSSadaf Ebrahimi void vmaUnmapMemory(
16326*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
16327*b7893ccfSSadaf Ebrahimi VmaAllocation allocation)
16328*b7893ccfSSadaf Ebrahimi {
16329*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && allocation);
16330*b7893ccfSSadaf Ebrahimi
16331*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16332*b7893ccfSSadaf Ebrahimi
16333*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
16334*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
16335*b7893ccfSSadaf Ebrahimi {
16336*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordUnmapMemory(
16337*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(),
16338*b7893ccfSSadaf Ebrahimi allocation);
16339*b7893ccfSSadaf Ebrahimi }
16340*b7893ccfSSadaf Ebrahimi #endif
16341*b7893ccfSSadaf Ebrahimi
16342*b7893ccfSSadaf Ebrahimi allocator->Unmap(allocation);
16343*b7893ccfSSadaf Ebrahimi }
16344*b7893ccfSSadaf Ebrahimi
vmaFlushAllocation(VmaAllocator allocator,VmaAllocation allocation,VkDeviceSize offset,VkDeviceSize size)16345*b7893ccfSSadaf Ebrahimi void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16346*b7893ccfSSadaf Ebrahimi {
16347*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && allocation);
16348*b7893ccfSSadaf Ebrahimi
16349*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaFlushAllocation");
16350*b7893ccfSSadaf Ebrahimi
16351*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16352*b7893ccfSSadaf Ebrahimi
16353*b7893ccfSSadaf Ebrahimi allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16354*b7893ccfSSadaf Ebrahimi
16355*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
16356*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
16357*b7893ccfSSadaf Ebrahimi {
16358*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordFlushAllocation(
16359*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(),
16360*b7893ccfSSadaf Ebrahimi allocation, offset, size);
16361*b7893ccfSSadaf Ebrahimi }
16362*b7893ccfSSadaf Ebrahimi #endif
16363*b7893ccfSSadaf Ebrahimi }
16364*b7893ccfSSadaf Ebrahimi
vmaInvalidateAllocation(VmaAllocator allocator,VmaAllocation allocation,VkDeviceSize offset,VkDeviceSize size)16365*b7893ccfSSadaf Ebrahimi void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16366*b7893ccfSSadaf Ebrahimi {
16367*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && allocation);
16368*b7893ccfSSadaf Ebrahimi
16369*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaInvalidateAllocation");
16370*b7893ccfSSadaf Ebrahimi
16371*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16372*b7893ccfSSadaf Ebrahimi
16373*b7893ccfSSadaf Ebrahimi allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16374*b7893ccfSSadaf Ebrahimi
16375*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
16376*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
16377*b7893ccfSSadaf Ebrahimi {
16378*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordInvalidateAllocation(
16379*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(),
16380*b7893ccfSSadaf Ebrahimi allocation, offset, size);
16381*b7893ccfSSadaf Ebrahimi }
16382*b7893ccfSSadaf Ebrahimi #endif
16383*b7893ccfSSadaf Ebrahimi }
16384*b7893ccfSSadaf Ebrahimi
vmaCheckCorruption(VmaAllocator allocator,uint32_t memoryTypeBits)16385*b7893ccfSSadaf Ebrahimi VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16386*b7893ccfSSadaf Ebrahimi {
16387*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator);
16388*b7893ccfSSadaf Ebrahimi
16389*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaCheckCorruption");
16390*b7893ccfSSadaf Ebrahimi
16391*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16392*b7893ccfSSadaf Ebrahimi
16393*b7893ccfSSadaf Ebrahimi return allocator->CheckCorruption(memoryTypeBits);
16394*b7893ccfSSadaf Ebrahimi }
16395*b7893ccfSSadaf Ebrahimi
vmaDefragment(VmaAllocator allocator,VmaAllocation * pAllocations,size_t allocationCount,VkBool32 * pAllocationsChanged,const VmaDefragmentationInfo * pDefragmentationInfo,VmaDefragmentationStats * pDefragmentationStats)16396*b7893ccfSSadaf Ebrahimi VkResult vmaDefragment(
16397*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
16398*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocations,
16399*b7893ccfSSadaf Ebrahimi size_t allocationCount,
16400*b7893ccfSSadaf Ebrahimi VkBool32* pAllocationsChanged,
16401*b7893ccfSSadaf Ebrahimi const VmaDefragmentationInfo *pDefragmentationInfo,
16402*b7893ccfSSadaf Ebrahimi VmaDefragmentationStats* pDefragmentationStats)
16403*b7893ccfSSadaf Ebrahimi {
16404*b7893ccfSSadaf Ebrahimi // Deprecated interface, reimplemented using new one.
16405*b7893ccfSSadaf Ebrahimi
16406*b7893ccfSSadaf Ebrahimi VmaDefragmentationInfo2 info2 = {};
16407*b7893ccfSSadaf Ebrahimi info2.allocationCount = (uint32_t)allocationCount;
16408*b7893ccfSSadaf Ebrahimi info2.pAllocations = pAllocations;
16409*b7893ccfSSadaf Ebrahimi info2.pAllocationsChanged = pAllocationsChanged;
16410*b7893ccfSSadaf Ebrahimi if(pDefragmentationInfo != VMA_NULL)
16411*b7893ccfSSadaf Ebrahimi {
16412*b7893ccfSSadaf Ebrahimi info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16413*b7893ccfSSadaf Ebrahimi info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16414*b7893ccfSSadaf Ebrahimi }
16415*b7893ccfSSadaf Ebrahimi else
16416*b7893ccfSSadaf Ebrahimi {
16417*b7893ccfSSadaf Ebrahimi info2.maxCpuAllocationsToMove = UINT32_MAX;
16418*b7893ccfSSadaf Ebrahimi info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16419*b7893ccfSSadaf Ebrahimi }
16420*b7893ccfSSadaf Ebrahimi // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16421*b7893ccfSSadaf Ebrahimi
16422*b7893ccfSSadaf Ebrahimi VmaDefragmentationContext ctx;
16423*b7893ccfSSadaf Ebrahimi VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16424*b7893ccfSSadaf Ebrahimi if(res == VK_NOT_READY)
16425*b7893ccfSSadaf Ebrahimi {
16426*b7893ccfSSadaf Ebrahimi res = vmaDefragmentationEnd( allocator, ctx);
16427*b7893ccfSSadaf Ebrahimi }
16428*b7893ccfSSadaf Ebrahimi return res;
16429*b7893ccfSSadaf Ebrahimi }
16430*b7893ccfSSadaf Ebrahimi
vmaDefragmentationBegin(VmaAllocator allocator,const VmaDefragmentationInfo2 * pInfo,VmaDefragmentationStats * pStats,VmaDefragmentationContext * pContext)16431*b7893ccfSSadaf Ebrahimi VkResult vmaDefragmentationBegin(
16432*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
16433*b7893ccfSSadaf Ebrahimi const VmaDefragmentationInfo2* pInfo,
16434*b7893ccfSSadaf Ebrahimi VmaDefragmentationStats* pStats,
16435*b7893ccfSSadaf Ebrahimi VmaDefragmentationContext *pContext)
16436*b7893ccfSSadaf Ebrahimi {
16437*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && pInfo && pContext);
16438*b7893ccfSSadaf Ebrahimi
16439*b7893ccfSSadaf Ebrahimi // Degenerate case: Nothing to defragment.
16440*b7893ccfSSadaf Ebrahimi if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16441*b7893ccfSSadaf Ebrahimi {
16442*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
16443*b7893ccfSSadaf Ebrahimi }
16444*b7893ccfSSadaf Ebrahimi
16445*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16446*b7893ccfSSadaf Ebrahimi VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16447*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16448*b7893ccfSSadaf Ebrahimi VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16449*b7893ccfSSadaf Ebrahimi
16450*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaDefragmentationBegin");
16451*b7893ccfSSadaf Ebrahimi
16452*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16453*b7893ccfSSadaf Ebrahimi
16454*b7893ccfSSadaf Ebrahimi VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16455*b7893ccfSSadaf Ebrahimi
16456*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
16457*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
16458*b7893ccfSSadaf Ebrahimi {
16459*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordDefragmentationBegin(
16460*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16461*b7893ccfSSadaf Ebrahimi }
16462*b7893ccfSSadaf Ebrahimi #endif
16463*b7893ccfSSadaf Ebrahimi
16464*b7893ccfSSadaf Ebrahimi return res;
16465*b7893ccfSSadaf Ebrahimi }
16466*b7893ccfSSadaf Ebrahimi
vmaDefragmentationEnd(VmaAllocator allocator,VmaDefragmentationContext context)16467*b7893ccfSSadaf Ebrahimi VkResult vmaDefragmentationEnd(
16468*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
16469*b7893ccfSSadaf Ebrahimi VmaDefragmentationContext context)
16470*b7893ccfSSadaf Ebrahimi {
16471*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator);
16472*b7893ccfSSadaf Ebrahimi
16473*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaDefragmentationEnd");
16474*b7893ccfSSadaf Ebrahimi
16475*b7893ccfSSadaf Ebrahimi if(context != VK_NULL_HANDLE)
16476*b7893ccfSSadaf Ebrahimi {
16477*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16478*b7893ccfSSadaf Ebrahimi
16479*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
16480*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
16481*b7893ccfSSadaf Ebrahimi {
16482*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordDefragmentationEnd(
16483*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(), context);
16484*b7893ccfSSadaf Ebrahimi }
16485*b7893ccfSSadaf Ebrahimi #endif
16486*b7893ccfSSadaf Ebrahimi
16487*b7893ccfSSadaf Ebrahimi return allocator->DefragmentationEnd(context);
16488*b7893ccfSSadaf Ebrahimi }
16489*b7893ccfSSadaf Ebrahimi else
16490*b7893ccfSSadaf Ebrahimi {
16491*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
16492*b7893ccfSSadaf Ebrahimi }
16493*b7893ccfSSadaf Ebrahimi }
16494*b7893ccfSSadaf Ebrahimi
vmaBindBufferMemory(VmaAllocator allocator,VmaAllocation allocation,VkBuffer buffer)16495*b7893ccfSSadaf Ebrahimi VkResult vmaBindBufferMemory(
16496*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
16497*b7893ccfSSadaf Ebrahimi VmaAllocation allocation,
16498*b7893ccfSSadaf Ebrahimi VkBuffer buffer)
16499*b7893ccfSSadaf Ebrahimi {
16500*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && allocation && buffer);
16501*b7893ccfSSadaf Ebrahimi
16502*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaBindBufferMemory");
16503*b7893ccfSSadaf Ebrahimi
16504*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16505*b7893ccfSSadaf Ebrahimi
16506*b7893ccfSSadaf Ebrahimi return allocator->BindBufferMemory(allocation, buffer);
16507*b7893ccfSSadaf Ebrahimi }
16508*b7893ccfSSadaf Ebrahimi
vmaBindImageMemory(VmaAllocator allocator,VmaAllocation allocation,VkImage image)16509*b7893ccfSSadaf Ebrahimi VkResult vmaBindImageMemory(
16510*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
16511*b7893ccfSSadaf Ebrahimi VmaAllocation allocation,
16512*b7893ccfSSadaf Ebrahimi VkImage image)
16513*b7893ccfSSadaf Ebrahimi {
16514*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && allocation && image);
16515*b7893ccfSSadaf Ebrahimi
16516*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaBindImageMemory");
16517*b7893ccfSSadaf Ebrahimi
16518*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16519*b7893ccfSSadaf Ebrahimi
16520*b7893ccfSSadaf Ebrahimi return allocator->BindImageMemory(allocation, image);
16521*b7893ccfSSadaf Ebrahimi }
16522*b7893ccfSSadaf Ebrahimi
vmaCreateBuffer(VmaAllocator allocator,const VkBufferCreateInfo * pBufferCreateInfo,const VmaAllocationCreateInfo * pAllocationCreateInfo,VkBuffer * pBuffer,VmaAllocation * pAllocation,VmaAllocationInfo * pAllocationInfo)16523*b7893ccfSSadaf Ebrahimi VkResult vmaCreateBuffer(
16524*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
16525*b7893ccfSSadaf Ebrahimi const VkBufferCreateInfo* pBufferCreateInfo,
16526*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo* pAllocationCreateInfo,
16527*b7893ccfSSadaf Ebrahimi VkBuffer* pBuffer,
16528*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocation,
16529*b7893ccfSSadaf Ebrahimi VmaAllocationInfo* pAllocationInfo)
16530*b7893ccfSSadaf Ebrahimi {
16531*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16532*b7893ccfSSadaf Ebrahimi
16533*b7893ccfSSadaf Ebrahimi if(pBufferCreateInfo->size == 0)
16534*b7893ccfSSadaf Ebrahimi {
16535*b7893ccfSSadaf Ebrahimi return VK_ERROR_VALIDATION_FAILED_EXT;
16536*b7893ccfSSadaf Ebrahimi }
16537*b7893ccfSSadaf Ebrahimi
16538*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaCreateBuffer");
16539*b7893ccfSSadaf Ebrahimi
16540*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16541*b7893ccfSSadaf Ebrahimi
16542*b7893ccfSSadaf Ebrahimi *pBuffer = VK_NULL_HANDLE;
16543*b7893ccfSSadaf Ebrahimi *pAllocation = VK_NULL_HANDLE;
16544*b7893ccfSSadaf Ebrahimi
16545*b7893ccfSSadaf Ebrahimi // 1. Create VkBuffer.
16546*b7893ccfSSadaf Ebrahimi VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16547*b7893ccfSSadaf Ebrahimi allocator->m_hDevice,
16548*b7893ccfSSadaf Ebrahimi pBufferCreateInfo,
16549*b7893ccfSSadaf Ebrahimi allocator->GetAllocationCallbacks(),
16550*b7893ccfSSadaf Ebrahimi pBuffer);
16551*b7893ccfSSadaf Ebrahimi if(res >= 0)
16552*b7893ccfSSadaf Ebrahimi {
16553*b7893ccfSSadaf Ebrahimi // 2. vkGetBufferMemoryRequirements.
16554*b7893ccfSSadaf Ebrahimi VkMemoryRequirements vkMemReq = {};
16555*b7893ccfSSadaf Ebrahimi bool requiresDedicatedAllocation = false;
16556*b7893ccfSSadaf Ebrahimi bool prefersDedicatedAllocation = false;
16557*b7893ccfSSadaf Ebrahimi allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16558*b7893ccfSSadaf Ebrahimi requiresDedicatedAllocation, prefersDedicatedAllocation);
16559*b7893ccfSSadaf Ebrahimi
16560*b7893ccfSSadaf Ebrahimi // Make sure alignment requirements for specific buffer usages reported
16561*b7893ccfSSadaf Ebrahimi // in Physical Device Properties are included in alignment reported by memory requirements.
16562*b7893ccfSSadaf Ebrahimi if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16563*b7893ccfSSadaf Ebrahimi {
16564*b7893ccfSSadaf Ebrahimi VMA_ASSERT(vkMemReq.alignment %
16565*b7893ccfSSadaf Ebrahimi allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16566*b7893ccfSSadaf Ebrahimi }
16567*b7893ccfSSadaf Ebrahimi if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16568*b7893ccfSSadaf Ebrahimi {
16569*b7893ccfSSadaf Ebrahimi VMA_ASSERT(vkMemReq.alignment %
16570*b7893ccfSSadaf Ebrahimi allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16571*b7893ccfSSadaf Ebrahimi }
16572*b7893ccfSSadaf Ebrahimi if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16573*b7893ccfSSadaf Ebrahimi {
16574*b7893ccfSSadaf Ebrahimi VMA_ASSERT(vkMemReq.alignment %
16575*b7893ccfSSadaf Ebrahimi allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16576*b7893ccfSSadaf Ebrahimi }
16577*b7893ccfSSadaf Ebrahimi
16578*b7893ccfSSadaf Ebrahimi // 3. Allocate memory using allocator.
16579*b7893ccfSSadaf Ebrahimi res = allocator->AllocateMemory(
16580*b7893ccfSSadaf Ebrahimi vkMemReq,
16581*b7893ccfSSadaf Ebrahimi requiresDedicatedAllocation,
16582*b7893ccfSSadaf Ebrahimi prefersDedicatedAllocation,
16583*b7893ccfSSadaf Ebrahimi *pBuffer, // dedicatedBuffer
16584*b7893ccfSSadaf Ebrahimi VK_NULL_HANDLE, // dedicatedImage
16585*b7893ccfSSadaf Ebrahimi *pAllocationCreateInfo,
16586*b7893ccfSSadaf Ebrahimi VMA_SUBALLOCATION_TYPE_BUFFER,
16587*b7893ccfSSadaf Ebrahimi 1, // allocationCount
16588*b7893ccfSSadaf Ebrahimi pAllocation);
16589*b7893ccfSSadaf Ebrahimi
16590*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
16591*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
16592*b7893ccfSSadaf Ebrahimi {
16593*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordCreateBuffer(
16594*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(),
16595*b7893ccfSSadaf Ebrahimi *pBufferCreateInfo,
16596*b7893ccfSSadaf Ebrahimi *pAllocationCreateInfo,
16597*b7893ccfSSadaf Ebrahimi *pAllocation);
16598*b7893ccfSSadaf Ebrahimi }
16599*b7893ccfSSadaf Ebrahimi #endif
16600*b7893ccfSSadaf Ebrahimi
16601*b7893ccfSSadaf Ebrahimi if(res >= 0)
16602*b7893ccfSSadaf Ebrahimi {
16603*b7893ccfSSadaf Ebrahimi // 3. Bind buffer with memory.
16604*b7893ccfSSadaf Ebrahimi res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16605*b7893ccfSSadaf Ebrahimi if(res >= 0)
16606*b7893ccfSSadaf Ebrahimi {
16607*b7893ccfSSadaf Ebrahimi // All steps succeeded.
16608*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
16609*b7893ccfSSadaf Ebrahimi (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16610*b7893ccfSSadaf Ebrahimi #endif
16611*b7893ccfSSadaf Ebrahimi if(pAllocationInfo != VMA_NULL)
16612*b7893ccfSSadaf Ebrahimi {
16613*b7893ccfSSadaf Ebrahimi allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16614*b7893ccfSSadaf Ebrahimi }
16615*b7893ccfSSadaf Ebrahimi
16616*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
16617*b7893ccfSSadaf Ebrahimi }
16618*b7893ccfSSadaf Ebrahimi allocator->FreeMemory(
16619*b7893ccfSSadaf Ebrahimi 1, // allocationCount
16620*b7893ccfSSadaf Ebrahimi pAllocation);
16621*b7893ccfSSadaf Ebrahimi *pAllocation = VK_NULL_HANDLE;
16622*b7893ccfSSadaf Ebrahimi (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16623*b7893ccfSSadaf Ebrahimi *pBuffer = VK_NULL_HANDLE;
16624*b7893ccfSSadaf Ebrahimi return res;
16625*b7893ccfSSadaf Ebrahimi }
16626*b7893ccfSSadaf Ebrahimi (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16627*b7893ccfSSadaf Ebrahimi *pBuffer = VK_NULL_HANDLE;
16628*b7893ccfSSadaf Ebrahimi return res;
16629*b7893ccfSSadaf Ebrahimi }
16630*b7893ccfSSadaf Ebrahimi return res;
16631*b7893ccfSSadaf Ebrahimi }
16632*b7893ccfSSadaf Ebrahimi
vmaDestroyBuffer(VmaAllocator allocator,VkBuffer buffer,VmaAllocation allocation)16633*b7893ccfSSadaf Ebrahimi void vmaDestroyBuffer(
16634*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
16635*b7893ccfSSadaf Ebrahimi VkBuffer buffer,
16636*b7893ccfSSadaf Ebrahimi VmaAllocation allocation)
16637*b7893ccfSSadaf Ebrahimi {
16638*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator);
16639*b7893ccfSSadaf Ebrahimi
16640*b7893ccfSSadaf Ebrahimi if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16641*b7893ccfSSadaf Ebrahimi {
16642*b7893ccfSSadaf Ebrahimi return;
16643*b7893ccfSSadaf Ebrahimi }
16644*b7893ccfSSadaf Ebrahimi
16645*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaDestroyBuffer");
16646*b7893ccfSSadaf Ebrahimi
16647*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16648*b7893ccfSSadaf Ebrahimi
16649*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
16650*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
16651*b7893ccfSSadaf Ebrahimi {
16652*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordDestroyBuffer(
16653*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(),
16654*b7893ccfSSadaf Ebrahimi allocation);
16655*b7893ccfSSadaf Ebrahimi }
16656*b7893ccfSSadaf Ebrahimi #endif
16657*b7893ccfSSadaf Ebrahimi
16658*b7893ccfSSadaf Ebrahimi if(buffer != VK_NULL_HANDLE)
16659*b7893ccfSSadaf Ebrahimi {
16660*b7893ccfSSadaf Ebrahimi (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16661*b7893ccfSSadaf Ebrahimi }
16662*b7893ccfSSadaf Ebrahimi
16663*b7893ccfSSadaf Ebrahimi if(allocation != VK_NULL_HANDLE)
16664*b7893ccfSSadaf Ebrahimi {
16665*b7893ccfSSadaf Ebrahimi allocator->FreeMemory(
16666*b7893ccfSSadaf Ebrahimi 1, // allocationCount
16667*b7893ccfSSadaf Ebrahimi &allocation);
16668*b7893ccfSSadaf Ebrahimi }
16669*b7893ccfSSadaf Ebrahimi }
16670*b7893ccfSSadaf Ebrahimi
vmaCreateImage(VmaAllocator allocator,const VkImageCreateInfo * pImageCreateInfo,const VmaAllocationCreateInfo * pAllocationCreateInfo,VkImage * pImage,VmaAllocation * pAllocation,VmaAllocationInfo * pAllocationInfo)16671*b7893ccfSSadaf Ebrahimi VkResult vmaCreateImage(
16672*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
16673*b7893ccfSSadaf Ebrahimi const VkImageCreateInfo* pImageCreateInfo,
16674*b7893ccfSSadaf Ebrahimi const VmaAllocationCreateInfo* pAllocationCreateInfo,
16675*b7893ccfSSadaf Ebrahimi VkImage* pImage,
16676*b7893ccfSSadaf Ebrahimi VmaAllocation* pAllocation,
16677*b7893ccfSSadaf Ebrahimi VmaAllocationInfo* pAllocationInfo)
16678*b7893ccfSSadaf Ebrahimi {
16679*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16680*b7893ccfSSadaf Ebrahimi
16681*b7893ccfSSadaf Ebrahimi if(pImageCreateInfo->extent.width == 0 ||
16682*b7893ccfSSadaf Ebrahimi pImageCreateInfo->extent.height == 0 ||
16683*b7893ccfSSadaf Ebrahimi pImageCreateInfo->extent.depth == 0 ||
16684*b7893ccfSSadaf Ebrahimi pImageCreateInfo->mipLevels == 0 ||
16685*b7893ccfSSadaf Ebrahimi pImageCreateInfo->arrayLayers == 0)
16686*b7893ccfSSadaf Ebrahimi {
16687*b7893ccfSSadaf Ebrahimi return VK_ERROR_VALIDATION_FAILED_EXT;
16688*b7893ccfSSadaf Ebrahimi }
16689*b7893ccfSSadaf Ebrahimi
16690*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaCreateImage");
16691*b7893ccfSSadaf Ebrahimi
16692*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16693*b7893ccfSSadaf Ebrahimi
16694*b7893ccfSSadaf Ebrahimi *pImage = VK_NULL_HANDLE;
16695*b7893ccfSSadaf Ebrahimi *pAllocation = VK_NULL_HANDLE;
16696*b7893ccfSSadaf Ebrahimi
16697*b7893ccfSSadaf Ebrahimi // 1. Create VkImage.
16698*b7893ccfSSadaf Ebrahimi VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16699*b7893ccfSSadaf Ebrahimi allocator->m_hDevice,
16700*b7893ccfSSadaf Ebrahimi pImageCreateInfo,
16701*b7893ccfSSadaf Ebrahimi allocator->GetAllocationCallbacks(),
16702*b7893ccfSSadaf Ebrahimi pImage);
16703*b7893ccfSSadaf Ebrahimi if(res >= 0)
16704*b7893ccfSSadaf Ebrahimi {
16705*b7893ccfSSadaf Ebrahimi VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16706*b7893ccfSSadaf Ebrahimi VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16707*b7893ccfSSadaf Ebrahimi VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16708*b7893ccfSSadaf Ebrahimi
16709*b7893ccfSSadaf Ebrahimi // 2. Allocate memory using allocator.
16710*b7893ccfSSadaf Ebrahimi VkMemoryRequirements vkMemReq = {};
16711*b7893ccfSSadaf Ebrahimi bool requiresDedicatedAllocation = false;
16712*b7893ccfSSadaf Ebrahimi bool prefersDedicatedAllocation = false;
16713*b7893ccfSSadaf Ebrahimi allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16714*b7893ccfSSadaf Ebrahimi requiresDedicatedAllocation, prefersDedicatedAllocation);
16715*b7893ccfSSadaf Ebrahimi
16716*b7893ccfSSadaf Ebrahimi res = allocator->AllocateMemory(
16717*b7893ccfSSadaf Ebrahimi vkMemReq,
16718*b7893ccfSSadaf Ebrahimi requiresDedicatedAllocation,
16719*b7893ccfSSadaf Ebrahimi prefersDedicatedAllocation,
16720*b7893ccfSSadaf Ebrahimi VK_NULL_HANDLE, // dedicatedBuffer
16721*b7893ccfSSadaf Ebrahimi *pImage, // dedicatedImage
16722*b7893ccfSSadaf Ebrahimi *pAllocationCreateInfo,
16723*b7893ccfSSadaf Ebrahimi suballocType,
16724*b7893ccfSSadaf Ebrahimi 1, // allocationCount
16725*b7893ccfSSadaf Ebrahimi pAllocation);
16726*b7893ccfSSadaf Ebrahimi
16727*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
16728*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
16729*b7893ccfSSadaf Ebrahimi {
16730*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordCreateImage(
16731*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(),
16732*b7893ccfSSadaf Ebrahimi *pImageCreateInfo,
16733*b7893ccfSSadaf Ebrahimi *pAllocationCreateInfo,
16734*b7893ccfSSadaf Ebrahimi *pAllocation);
16735*b7893ccfSSadaf Ebrahimi }
16736*b7893ccfSSadaf Ebrahimi #endif
16737*b7893ccfSSadaf Ebrahimi
16738*b7893ccfSSadaf Ebrahimi if(res >= 0)
16739*b7893ccfSSadaf Ebrahimi {
16740*b7893ccfSSadaf Ebrahimi // 3. Bind image with memory.
16741*b7893ccfSSadaf Ebrahimi res = allocator->BindImageMemory(*pAllocation, *pImage);
16742*b7893ccfSSadaf Ebrahimi if(res >= 0)
16743*b7893ccfSSadaf Ebrahimi {
16744*b7893ccfSSadaf Ebrahimi // All steps succeeded.
16745*b7893ccfSSadaf Ebrahimi #if VMA_STATS_STRING_ENABLED
16746*b7893ccfSSadaf Ebrahimi (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16747*b7893ccfSSadaf Ebrahimi #endif
16748*b7893ccfSSadaf Ebrahimi if(pAllocationInfo != VMA_NULL)
16749*b7893ccfSSadaf Ebrahimi {
16750*b7893ccfSSadaf Ebrahimi allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16751*b7893ccfSSadaf Ebrahimi }
16752*b7893ccfSSadaf Ebrahimi
16753*b7893ccfSSadaf Ebrahimi return VK_SUCCESS;
16754*b7893ccfSSadaf Ebrahimi }
16755*b7893ccfSSadaf Ebrahimi allocator->FreeMemory(
16756*b7893ccfSSadaf Ebrahimi 1, // allocationCount
16757*b7893ccfSSadaf Ebrahimi pAllocation);
16758*b7893ccfSSadaf Ebrahimi *pAllocation = VK_NULL_HANDLE;
16759*b7893ccfSSadaf Ebrahimi (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16760*b7893ccfSSadaf Ebrahimi *pImage = VK_NULL_HANDLE;
16761*b7893ccfSSadaf Ebrahimi return res;
16762*b7893ccfSSadaf Ebrahimi }
16763*b7893ccfSSadaf Ebrahimi (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16764*b7893ccfSSadaf Ebrahimi *pImage = VK_NULL_HANDLE;
16765*b7893ccfSSadaf Ebrahimi return res;
16766*b7893ccfSSadaf Ebrahimi }
16767*b7893ccfSSadaf Ebrahimi return res;
16768*b7893ccfSSadaf Ebrahimi }
16769*b7893ccfSSadaf Ebrahimi
vmaDestroyImage(VmaAllocator allocator,VkImage image,VmaAllocation allocation)16770*b7893ccfSSadaf Ebrahimi void vmaDestroyImage(
16771*b7893ccfSSadaf Ebrahimi VmaAllocator allocator,
16772*b7893ccfSSadaf Ebrahimi VkImage image,
16773*b7893ccfSSadaf Ebrahimi VmaAllocation allocation)
16774*b7893ccfSSadaf Ebrahimi {
16775*b7893ccfSSadaf Ebrahimi VMA_ASSERT(allocator);
16776*b7893ccfSSadaf Ebrahimi
16777*b7893ccfSSadaf Ebrahimi if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16778*b7893ccfSSadaf Ebrahimi {
16779*b7893ccfSSadaf Ebrahimi return;
16780*b7893ccfSSadaf Ebrahimi }
16781*b7893ccfSSadaf Ebrahimi
16782*b7893ccfSSadaf Ebrahimi VMA_DEBUG_LOG("vmaDestroyImage");
16783*b7893ccfSSadaf Ebrahimi
16784*b7893ccfSSadaf Ebrahimi VMA_DEBUG_GLOBAL_MUTEX_LOCK
16785*b7893ccfSSadaf Ebrahimi
16786*b7893ccfSSadaf Ebrahimi #if VMA_RECORDING_ENABLED
16787*b7893ccfSSadaf Ebrahimi if(allocator->GetRecorder() != VMA_NULL)
16788*b7893ccfSSadaf Ebrahimi {
16789*b7893ccfSSadaf Ebrahimi allocator->GetRecorder()->RecordDestroyImage(
16790*b7893ccfSSadaf Ebrahimi allocator->GetCurrentFrameIndex(),
16791*b7893ccfSSadaf Ebrahimi allocation);
16792*b7893ccfSSadaf Ebrahimi }
16793*b7893ccfSSadaf Ebrahimi #endif
16794*b7893ccfSSadaf Ebrahimi
16795*b7893ccfSSadaf Ebrahimi if(image != VK_NULL_HANDLE)
16796*b7893ccfSSadaf Ebrahimi {
16797*b7893ccfSSadaf Ebrahimi (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
16798*b7893ccfSSadaf Ebrahimi }
16799*b7893ccfSSadaf Ebrahimi if(allocation != VK_NULL_HANDLE)
16800*b7893ccfSSadaf Ebrahimi {
16801*b7893ccfSSadaf Ebrahimi allocator->FreeMemory(
16802*b7893ccfSSadaf Ebrahimi 1, // allocationCount
16803*b7893ccfSSadaf Ebrahimi &allocation);
16804*b7893ccfSSadaf Ebrahimi }
16805*b7893ccfSSadaf Ebrahimi }
16806*b7893ccfSSadaf Ebrahimi #if defined(__GNUC__)
16807*b7893ccfSSadaf Ebrahimi #pragma GCC diagnostic pop
16808*b7893ccfSSadaf Ebrahimi #if defined(__clang__)
16809*b7893ccfSSadaf Ebrahimi #pragma clang diagnostic pop
16810*b7893ccfSSadaf Ebrahimi #endif
16811*b7893ccfSSadaf Ebrahimi #endif
16812*b7893ccfSSadaf Ebrahimi #endif // #ifdef VMA_IMPLEMENTATION
16813*b7893ccfSSadaf Ebrahimi // clang-format on
16814