1 /*
2 * Copyright 2020 Google LLC
3 * SPDX-License-Identifier: MIT
4 */
5
6 #include "vkr_device.h"
7
8 #include "venus-protocol/vn_protocol_renderer_device.h"
9
10 #include "vkr_command_buffer.h"
11 #include "vkr_context.h"
12 #include "vkr_descriptor_set.h"
13 #include "vkr_device_memory.h"
14 #include "vkr_physical_device.h"
15 #include "vkr_queue.h"
16
17 static VkResult
vkr_device_create_queues(struct vkr_context * ctx,struct vkr_device * dev,uint32_t create_info_count,const VkDeviceQueueCreateInfo * create_infos)18 vkr_device_create_queues(struct vkr_context *ctx,
19 struct vkr_device *dev,
20 uint32_t create_info_count,
21 const VkDeviceQueueCreateInfo *create_infos)
22 {
23 struct vn_device_proc_table *vk = &dev->proc_table;
24 list_inithead(&dev->queues);
25
26 for (uint32_t i = 0; i < create_info_count; i++) {
27 for (uint32_t j = 0; j < create_infos[i].queueCount; j++) {
28 const VkDeviceQueueInfo2 info = {
29 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
30 .pNext = NULL,
31 .flags = create_infos[i].flags,
32 .queueFamilyIndex = create_infos[i].queueFamilyIndex,
33 .queueIndex = j,
34 };
35 VkQueue handle = VK_NULL_HANDLE;
36 /* There was a bug in spec which forbids usage of vkGetDeviceQueue2
37 * with flags set to zero. It was fixed in spec version 1.1.130.
38 * Work around drivers that are implementing this buggy behavior
39 */
40 if (info.flags) {
41 vk->GetDeviceQueue2(dev->base.handle.device, &info, &handle);
42 } else {
43 vk->GetDeviceQueue(dev->base.handle.device, info.queueFamilyIndex,
44 info.queueIndex, &handle);
45 }
46
47 struct vkr_queue *queue = vkr_queue_create(
48 ctx, dev, info.flags, info.queueFamilyIndex, info.queueIndex, handle);
49 if (!queue) {
50 struct vkr_queue *entry, *tmp;
51 LIST_FOR_EACH_ENTRY_SAFE (entry, tmp, &dev->queues, base.track_head)
52 vkr_queue_destroy(ctx, entry);
53
54 return VK_ERROR_OUT_OF_HOST_MEMORY;
55 }
56
57 /* queues are not tracked as device objects */
58 list_add(&queue->base.track_head, &dev->queues);
59 }
60 }
61
62 return VK_SUCCESS;
63 }
64
65 static void
vkr_device_init_proc_table(struct vkr_device * dev,uint32_t api_version,const char * const * exts,uint32_t count)66 vkr_device_init_proc_table(struct vkr_device *dev,
67 uint32_t api_version,
68 const char *const *exts,
69 uint32_t count)
70 {
71 struct vn_info_extension_table ext_table;
72 vkr_extension_table_init(&ext_table, exts, count);
73
74 vn_util_init_device_proc_table(dev->base.handle.device, api_version, &ext_table,
75 &dev->proc_table);
76 }
77
78 static void
vkr_dispatch_vkCreateDevice(struct vn_dispatch_context * dispatch,struct vn_command_vkCreateDevice * args)79 vkr_dispatch_vkCreateDevice(struct vn_dispatch_context *dispatch,
80 struct vn_command_vkCreateDevice *args)
81 {
82 struct vkr_context *ctx = dispatch->data;
83
84 struct vkr_physical_device *physical_dev =
85 vkr_physical_device_from_handle(args->physicalDevice);
86
87 /* append extensions for our own use */
88 const char **exts = NULL;
89 uint32_t ext_count = args->pCreateInfo->enabledExtensionCount;
90 ext_count += physical_dev->KHR_external_memory_fd;
91 ext_count += physical_dev->EXT_external_memory_dma_buf;
92 ext_count += physical_dev->KHR_external_fence_fd;
93 if (ext_count > args->pCreateInfo->enabledExtensionCount) {
94 exts = malloc(sizeof(*exts) * ext_count);
95 if (!exts) {
96 args->ret = VK_ERROR_OUT_OF_HOST_MEMORY;
97 return;
98 }
99 for (uint32_t i = 0; i < args->pCreateInfo->enabledExtensionCount; i++)
100 exts[i] = args->pCreateInfo->ppEnabledExtensionNames[i];
101
102 ext_count = args->pCreateInfo->enabledExtensionCount;
103 if (physical_dev->KHR_external_memory_fd)
104 exts[ext_count++] = "VK_KHR_external_memory_fd";
105 if (physical_dev->EXT_external_memory_dma_buf)
106 exts[ext_count++] = "VK_EXT_external_memory_dma_buf";
107 if (physical_dev->KHR_external_fence_fd)
108 exts[ext_count++] = "VK_KHR_external_fence_fd";
109
110 ((VkDeviceCreateInfo *)args->pCreateInfo)->ppEnabledExtensionNames = exts;
111 ((VkDeviceCreateInfo *)args->pCreateInfo)->enabledExtensionCount = ext_count;
112 }
113
114 struct vkr_device *dev =
115 vkr_context_alloc_object(ctx, sizeof(*dev), VK_OBJECT_TYPE_DEVICE, args->pDevice);
116 if (!dev) {
117 args->ret = VK_ERROR_OUT_OF_HOST_MEMORY;
118 free(exts);
119 return;
120 }
121
122 vn_replace_vkCreateDevice_args_handle(args);
123 args->ret = vkCreateDevice(args->physicalDevice, args->pCreateInfo, NULL,
124 &dev->base.handle.device);
125 if (args->ret != VK_SUCCESS) {
126 free(exts);
127 free(dev);
128 return;
129 }
130
131 dev->physical_device = physical_dev;
132
133 vkr_device_init_proc_table(dev, physical_dev->api_version,
134 args->pCreateInfo->ppEnabledExtensionNames,
135 args->pCreateInfo->enabledExtensionCount);
136
137 free(exts);
138
139 args->ret = vkr_device_create_queues(ctx, dev, args->pCreateInfo->queueCreateInfoCount,
140 args->pCreateInfo->pQueueCreateInfos);
141 if (args->ret != VK_SUCCESS) {
142 vkDestroyDevice(dev->base.handle.device, NULL);
143 free(dev);
144 return;
145 }
146
147 mtx_init(&dev->free_sync_mutex, mtx_plain);
148 list_inithead(&dev->free_syncs);
149
150 list_inithead(&dev->objects);
151
152 list_add(&dev->base.track_head, &physical_dev->devices);
153
154 vkr_context_add_object(ctx, &dev->base);
155 }
156
157 static void
vkr_device_object_destroy(struct vkr_context * ctx,struct vkr_device * dev,struct vkr_object * obj)158 vkr_device_object_destroy(struct vkr_context *ctx,
159 struct vkr_device *dev,
160 struct vkr_object *obj)
161 {
162 struct vn_device_proc_table *vk = &dev->proc_table;
163 VkDevice device = dev->base.handle.device;
164
165 assert(vkr_device_should_track_object(obj));
166
167 switch (obj->type) {
168 case VK_OBJECT_TYPE_SEMAPHORE:
169 vk->DestroySemaphore(device, obj->handle.semaphore, NULL);
170 break;
171 case VK_OBJECT_TYPE_FENCE:
172 vk->DestroyFence(device, obj->handle.fence, NULL);
173 break;
174 case VK_OBJECT_TYPE_DEVICE_MEMORY:
175 vk->FreeMemory(device, obj->handle.device_memory, NULL);
176 vkr_device_memory_release((struct vkr_device_memory *)obj);
177 break;
178 case VK_OBJECT_TYPE_BUFFER:
179 vk->DestroyBuffer(device, obj->handle.buffer, NULL);
180 break;
181 case VK_OBJECT_TYPE_IMAGE:
182 vk->DestroyImage(device, obj->handle.image, NULL);
183 break;
184 case VK_OBJECT_TYPE_EVENT:
185 vk->DestroyEvent(device, obj->handle.event, NULL);
186 break;
187 case VK_OBJECT_TYPE_QUERY_POOL:
188 vk->DestroyQueryPool(device, obj->handle.query_pool, NULL);
189 break;
190 case VK_OBJECT_TYPE_BUFFER_VIEW:
191 vk->DestroyBufferView(device, obj->handle.buffer_view, NULL);
192 break;
193 case VK_OBJECT_TYPE_IMAGE_VIEW:
194 vk->DestroyImageView(device, obj->handle.image_view, NULL);
195 break;
196 case VK_OBJECT_TYPE_SHADER_MODULE:
197 vk->DestroyShaderModule(device, obj->handle.shader_module, NULL);
198 break;
199 case VK_OBJECT_TYPE_PIPELINE_CACHE:
200 vk->DestroyPipelineCache(device, obj->handle.pipeline_cache, NULL);
201 break;
202 case VK_OBJECT_TYPE_PIPELINE_LAYOUT:
203 vk->DestroyPipelineLayout(device, obj->handle.pipeline_layout, NULL);
204 break;
205 case VK_OBJECT_TYPE_RENDER_PASS:
206 vk->DestroyRenderPass(device, obj->handle.render_pass, NULL);
207 break;
208 case VK_OBJECT_TYPE_PIPELINE:
209 vk->DestroyPipeline(device, obj->handle.pipeline, NULL);
210 break;
211 case VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT:
212 vk->DestroyDescriptorSetLayout(device, obj->handle.descriptor_set_layout, NULL);
213 break;
214 case VK_OBJECT_TYPE_SAMPLER:
215 vk->DestroySampler(device, obj->handle.sampler, NULL);
216 break;
217 case VK_OBJECT_TYPE_DESCRIPTOR_POOL: {
218 /* Destroying VkDescriptorPool frees all VkDescriptorSet allocated inside. */
219 vk->DestroyDescriptorPool(device, obj->handle.descriptor_pool, NULL);
220 vkr_descriptor_pool_release(ctx, (struct vkr_descriptor_pool *)obj);
221 break;
222 }
223 case VK_OBJECT_TYPE_FRAMEBUFFER:
224 vk->DestroyFramebuffer(device, obj->handle.framebuffer, NULL);
225 break;
226 case VK_OBJECT_TYPE_COMMAND_POOL: {
227 /* Destroying VkCommandPool frees all VkCommandBuffer allocated inside. */
228 vk->DestroyCommandPool(device, obj->handle.command_pool, NULL);
229 vkr_command_pool_release(ctx, (struct vkr_command_pool *)obj);
230 break;
231 }
232 case VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION:
233 vk->DestroySamplerYcbcrConversion(device, obj->handle.sampler_ycbcr_conversion,
234 NULL);
235 break;
236 case VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE:
237 vk->DestroyDescriptorUpdateTemplate(device, obj->handle.descriptor_update_template,
238 NULL);
239 break;
240 default:
241 vkr_log("Unhandled vkr_object(%p) with VkObjectType(%u)", obj, (uint32_t)obj->type);
242 assert(false);
243 break;
244 };
245
246 vkr_device_remove_object(ctx, dev, obj);
247 }
248
249 void
vkr_device_destroy(struct vkr_context * ctx,struct vkr_device * dev)250 vkr_device_destroy(struct vkr_context *ctx, struct vkr_device *dev)
251 {
252 struct vn_device_proc_table *vk = &dev->proc_table;
253 VkDevice device = dev->base.handle.device;
254
255 if (!LIST_IS_EMPTY(&dev->objects))
256 vkr_log("destroying device with valid objects");
257
258 VkResult result = vk->DeviceWaitIdle(device);
259 if (result != VK_SUCCESS)
260 vkr_log("vkDeviceWaitIdle(%p) failed(%d)", dev, (int32_t)result);
261
262 if (!LIST_IS_EMPTY(&dev->objects)) {
263 struct vkr_object *obj, *obj_tmp;
264 LIST_FOR_EACH_ENTRY_SAFE (obj, obj_tmp, &dev->objects, track_head)
265 vkr_device_object_destroy(ctx, dev, obj);
266 }
267
268 struct vkr_queue *queue, *queue_tmp;
269 LIST_FOR_EACH_ENTRY_SAFE (queue, queue_tmp, &dev->queues, base.track_head)
270 vkr_queue_destroy(ctx, queue);
271
272 struct vkr_queue_sync *sync, *sync_tmp;
273 LIST_FOR_EACH_ENTRY_SAFE (sync, sync_tmp, &dev->free_syncs, head) {
274 vk->DestroyFence(dev->base.handle.device, sync->fence, NULL);
275 free(sync);
276 }
277
278 mtx_destroy(&dev->free_sync_mutex);
279
280 vk->DestroyDevice(device, NULL);
281
282 list_del(&dev->base.track_head);
283
284 vkr_context_remove_object(ctx, &dev->base);
285 }
286
287 static void
vkr_dispatch_vkDestroyDevice(struct vn_dispatch_context * dispatch,struct vn_command_vkDestroyDevice * args)288 vkr_dispatch_vkDestroyDevice(struct vn_dispatch_context *dispatch,
289 struct vn_command_vkDestroyDevice *args)
290 {
291 struct vkr_context *ctx = dispatch->data;
292
293 struct vkr_device *dev = vkr_device_from_handle(args->device);
294 /* this never happens */
295 if (!dev)
296 return;
297
298 vkr_device_destroy(ctx, dev);
299 }
300
301 static void
vkr_dispatch_vkGetDeviceGroupPeerMemoryFeatures(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkGetDeviceGroupPeerMemoryFeatures * args)302 vkr_dispatch_vkGetDeviceGroupPeerMemoryFeatures(
303 UNUSED struct vn_dispatch_context *dispatch,
304 struct vn_command_vkGetDeviceGroupPeerMemoryFeatures *args)
305 {
306 struct vkr_device *dev = vkr_device_from_handle(args->device);
307 struct vn_device_proc_table *vk = &dev->proc_table;
308
309 vn_replace_vkGetDeviceGroupPeerMemoryFeatures_args_handle(args);
310 vk->GetDeviceGroupPeerMemoryFeatures(args->device, args->heapIndex,
311 args->localDeviceIndex, args->remoteDeviceIndex,
312 args->pPeerMemoryFeatures);
313 }
314
315 static void
vkr_dispatch_vkDeviceWaitIdle(struct vn_dispatch_context * dispatch,UNUSED struct vn_command_vkDeviceWaitIdle * args)316 vkr_dispatch_vkDeviceWaitIdle(struct vn_dispatch_context *dispatch,
317 UNUSED struct vn_command_vkDeviceWaitIdle *args)
318 {
319 struct vkr_context *ctx = dispatch->data;
320 /* no blocking call */
321 vkr_cs_decoder_set_fatal(&ctx->decoder);
322 }
323
324 static void
vkr_dispatch_vkGetCalibratedTimestampsEXT(UNUSED struct vn_dispatch_context * ctx,struct vn_command_vkGetCalibratedTimestampsEXT * args)325 vkr_dispatch_vkGetCalibratedTimestampsEXT(
326 UNUSED struct vn_dispatch_context *ctx,
327 struct vn_command_vkGetCalibratedTimestampsEXT *args)
328 {
329 struct vkr_device *dev = vkr_device_from_handle(args->device);
330 struct vn_device_proc_table *vk = &dev->proc_table;
331
332 vn_replace_vkGetCalibratedTimestampsEXT_args_handle(args);
333 args->ret = vk->GetCalibratedTimestampsEXT(args->device, args->timestampCount,
334 args->pTimestampInfos, args->pTimestamps,
335 args->pMaxDeviation);
336 }
337
338 void
vkr_context_init_device_dispatch(struct vkr_context * ctx)339 vkr_context_init_device_dispatch(struct vkr_context *ctx)
340 {
341 struct vn_dispatch_context *dispatch = &ctx->dispatch;
342
343 dispatch->dispatch_vkCreateDevice = vkr_dispatch_vkCreateDevice;
344 dispatch->dispatch_vkDestroyDevice = vkr_dispatch_vkDestroyDevice;
345 dispatch->dispatch_vkGetDeviceProcAddr = NULL;
346 dispatch->dispatch_vkGetDeviceGroupPeerMemoryFeatures =
347 vkr_dispatch_vkGetDeviceGroupPeerMemoryFeatures;
348 dispatch->dispatch_vkDeviceWaitIdle = vkr_dispatch_vkDeviceWaitIdle;
349 dispatch->dispatch_vkGetCalibratedTimestampsEXT =
350 vkr_dispatch_vkGetCalibratedTimestampsEXT;
351 }
352