xref: /aosp_15_r20/external/mesa3d/src/vulkan/runtime/vk_device.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2020 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "vk_device.h"
25 
26 #include "vk_common_entrypoints.h"
27 #include "vk_instance.h"
28 #include "vk_log.h"
29 #include "vk_physical_device.h"
30 #include "vk_queue.h"
31 #include "vk_sync.h"
32 #include "vk_sync_timeline.h"
33 #include "vk_util.h"
34 #include "util/u_debug.h"
35 #include "util/hash_table.h"
36 #include "util/perf/cpu_trace.h"
37 #include "util/ralloc.h"
38 
39 static enum vk_device_timeline_mode
get_timeline_mode(struct vk_physical_device * physical_device)40 get_timeline_mode(struct vk_physical_device *physical_device)
41 {
42    if (physical_device->supported_sync_types == NULL)
43       return VK_DEVICE_TIMELINE_MODE_NONE;
44 
45    const struct vk_sync_type *timeline_type = NULL;
46    for (const struct vk_sync_type *const *t =
47         physical_device->supported_sync_types; *t; t++) {
48       if ((*t)->features & VK_SYNC_FEATURE_TIMELINE) {
49          /* We can only have one timeline mode */
50          assert(timeline_type == NULL);
51          timeline_type = *t;
52       }
53    }
54 
55    if (timeline_type == NULL)
56       return VK_DEVICE_TIMELINE_MODE_NONE;
57 
58    if (vk_sync_type_is_vk_sync_timeline(timeline_type))
59       return VK_DEVICE_TIMELINE_MODE_EMULATED;
60 
61    if (timeline_type->features & VK_SYNC_FEATURE_WAIT_BEFORE_SIGNAL)
62       return VK_DEVICE_TIMELINE_MODE_NATIVE;
63 
64    /* For assisted mode, we require a few additional things of all sync types
65     * which may be used as semaphores.
66     */
67    for (const struct vk_sync_type *const *t =
68         physical_device->supported_sync_types; *t; t++) {
69       if ((*t)->features & VK_SYNC_FEATURE_GPU_WAIT) {
70          assert((*t)->features & VK_SYNC_FEATURE_WAIT_PENDING);
71          if ((*t)->features & VK_SYNC_FEATURE_BINARY)
72             assert((*t)->features & VK_SYNC_FEATURE_CPU_RESET);
73       }
74    }
75 
76    return VK_DEVICE_TIMELINE_MODE_ASSISTED;
77 }
78 
79 static void
collect_enabled_features(struct vk_device * device,const VkDeviceCreateInfo * pCreateInfo)80 collect_enabled_features(struct vk_device *device,
81                          const VkDeviceCreateInfo *pCreateInfo)
82 {
83    if (pCreateInfo->pEnabledFeatures)
84       vk_set_physical_device_features_1_0(&device->enabled_features, pCreateInfo->pEnabledFeatures);
85    vk_set_physical_device_features(&device->enabled_features, pCreateInfo->pNext);
86 }
87 
88 VkResult
vk_device_init(struct vk_device * device,struct vk_physical_device * physical_device,const struct vk_device_dispatch_table * dispatch_table,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * alloc)89 vk_device_init(struct vk_device *device,
90                struct vk_physical_device *physical_device,
91                const struct vk_device_dispatch_table *dispatch_table,
92                const VkDeviceCreateInfo *pCreateInfo,
93                const VkAllocationCallbacks *alloc)
94 {
95    memset(device, 0, sizeof(*device));
96    vk_object_base_init(device, &device->base, VK_OBJECT_TYPE_DEVICE);
97    if (alloc != NULL)
98       device->alloc = *alloc;
99    else
100       device->alloc = physical_device->instance->alloc;
101 
102    device->physical = physical_device;
103 
104    if (dispatch_table) {
105       device->dispatch_table = *dispatch_table;
106 
107       /* Add common entrypoints without overwriting driver-provided ones. */
108       vk_device_dispatch_table_from_entrypoints(
109          &device->dispatch_table, &vk_common_device_entrypoints, false);
110    }
111 
112    for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
113       int idx;
114       for (idx = 0; idx < VK_DEVICE_EXTENSION_COUNT; idx++) {
115          if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
116                     vk_device_extensions[idx].extensionName) == 0)
117             break;
118       }
119 
120       if (idx >= VK_DEVICE_EXTENSION_COUNT)
121          return vk_errorf(physical_device, VK_ERROR_EXTENSION_NOT_PRESENT,
122                           "%s not supported",
123                           pCreateInfo->ppEnabledExtensionNames[i]);
124 
125       if (!physical_device->supported_extensions.extensions[idx])
126          return vk_errorf(physical_device, VK_ERROR_EXTENSION_NOT_PRESENT,
127                           "%s not supported",
128                           pCreateInfo->ppEnabledExtensionNames[i]);
129 
130 #ifdef ANDROID_STRICT
131       if (!vk_android_allowed_device_extensions.extensions[idx])
132          return vk_errorf(physical_device, VK_ERROR_EXTENSION_NOT_PRESENT,
133                           "%s not supported",
134                           pCreateInfo->ppEnabledExtensionNames[i]);
135 #endif
136 
137       device->enabled_extensions.extensions[idx] = true;
138    }
139 
140    VkResult result =
141       vk_physical_device_check_device_features(physical_device,
142                                                pCreateInfo);
143    if (result != VK_SUCCESS)
144       return result;
145 
146    collect_enabled_features(device, pCreateInfo);
147 
148    p_atomic_set(&device->private_data_next_index, 0);
149 
150    list_inithead(&device->queues);
151 
152    device->drm_fd = -1;
153    device->mem_cache = NULL;
154 
155    device->timeline_mode = get_timeline_mode(physical_device);
156 
157    switch (device->timeline_mode) {
158    case VK_DEVICE_TIMELINE_MODE_NONE:
159    case VK_DEVICE_TIMELINE_MODE_NATIVE:
160       device->submit_mode = VK_QUEUE_SUBMIT_MODE_IMMEDIATE;
161       break;
162 
163    case VK_DEVICE_TIMELINE_MODE_EMULATED:
164       device->submit_mode = VK_QUEUE_SUBMIT_MODE_DEFERRED;
165       break;
166 
167    case VK_DEVICE_TIMELINE_MODE_ASSISTED:
168       if (os_get_option("MESA_VK_ENABLE_SUBMIT_THREAD")) {
169          if (debug_get_bool_option("MESA_VK_ENABLE_SUBMIT_THREAD", false)) {
170             device->submit_mode = VK_QUEUE_SUBMIT_MODE_THREADED;
171          } else {
172             device->submit_mode = VK_QUEUE_SUBMIT_MODE_IMMEDIATE;
173          }
174       } else {
175          device->submit_mode = VK_QUEUE_SUBMIT_MODE_THREADED_ON_DEMAND;
176       }
177       break;
178 
179    default:
180       unreachable("Invalid timeline mode");
181    }
182 
183 #if DETECT_OS_ANDROID
184    mtx_init(&device->swapchain_private_mtx, mtx_plain);
185    device->swapchain_private = NULL;
186 #endif /* DETECT_OS_ANDROID */
187 
188    simple_mtx_init(&device->trace_mtx, mtx_plain);
189 
190    vk_foreach_struct_const (ext, pCreateInfo->pNext) {
191       switch (ext->sType) {
192       case VK_STRUCTURE_TYPE_DEVICE_PIPELINE_BINARY_INTERNAL_CACHE_CONTROL_KHR: {
193          const VkDevicePipelineBinaryInternalCacheControlKHR *cache_control = (const void *)ext;
194          if (cache_control->disableInternalCache)
195             device->disable_internal_cache = true;
196          break;
197       }
198       default:
199          break;
200       }
201    }
202 
203    return VK_SUCCESS;
204 }
205 
206 void
vk_device_finish(struct vk_device * device)207 vk_device_finish(struct vk_device *device)
208 {
209    /* Drivers should tear down their own queues */
210    assert(list_is_empty(&device->queues));
211 
212    vk_memory_trace_finish(device);
213 
214 #if DETECT_OS_ANDROID
215    if (device->swapchain_private) {
216       hash_table_foreach(device->swapchain_private, entry)
217          util_sparse_array_finish(entry->data);
218       ralloc_free(device->swapchain_private);
219    }
220 #endif /* DETECT_OS_ANDROID */
221 
222    simple_mtx_destroy(&device->trace_mtx);
223 
224    vk_object_base_finish(&device->base);
225 }
226 
227 void
vk_device_enable_threaded_submit(struct vk_device * device)228 vk_device_enable_threaded_submit(struct vk_device *device)
229 {
230    /* This must be called before any queues are created */
231    assert(list_is_empty(&device->queues));
232 
233    /* In order to use threaded submit, we need every sync type that can be
234     * used as a wait fence for vkQueueSubmit() to support WAIT_PENDING.
235     * It's required for cross-thread/process submit re-ordering.
236     */
237    for (const struct vk_sync_type *const *t =
238         device->physical->supported_sync_types; *t; t++) {
239       if ((*t)->features & VK_SYNC_FEATURE_GPU_WAIT)
240          assert((*t)->features & VK_SYNC_FEATURE_WAIT_PENDING);
241    }
242 
243    /* Any binary vk_sync types which will be used as permanent semaphore
244     * payloads also need to support vk_sync_type::move, but that's a lot
245     * harder to assert since it only applies to permanent semaphore payloads.
246     */
247 
248    if (device->submit_mode != VK_QUEUE_SUBMIT_MODE_THREADED)
249       device->submit_mode = VK_QUEUE_SUBMIT_MODE_THREADED_ON_DEMAND;
250 }
251 
252 VkResult
vk_device_flush(struct vk_device * device)253 vk_device_flush(struct vk_device *device)
254 {
255    if (device->submit_mode != VK_QUEUE_SUBMIT_MODE_DEFERRED)
256       return VK_SUCCESS;
257 
258    bool progress;
259    do {
260       progress = false;
261 
262       vk_foreach_queue(queue, device) {
263          uint32_t queue_submit_count;
264          VkResult result = vk_queue_flush(queue, &queue_submit_count);
265          if (unlikely(result != VK_SUCCESS))
266             return result;
267 
268          if (queue_submit_count)
269             progress = true;
270       }
271    } while (progress);
272 
273    return VK_SUCCESS;
274 }
275 
276 static const char *
timeline_mode_str(struct vk_device * device)277 timeline_mode_str(struct vk_device *device)
278 {
279    switch (device->timeline_mode) {
280 #define CASE(X) case VK_DEVICE_TIMELINE_MODE_##X: return #X;
281    CASE(NONE)
282    CASE(EMULATED)
283    CASE(ASSISTED)
284    CASE(NATIVE)
285 #undef CASE
286    default: return "UNKNOWN";
287    }
288 }
289 
290 void
_vk_device_report_lost(struct vk_device * device)291 _vk_device_report_lost(struct vk_device *device)
292 {
293    assert(p_atomic_read(&device->_lost.lost) > 0);
294 
295    device->_lost.reported = true;
296 
297    vk_foreach_queue(queue, device) {
298       if (queue->_lost.lost) {
299          __vk_errorf(queue, VK_ERROR_DEVICE_LOST,
300                      queue->_lost.error_file, queue->_lost.error_line,
301                      "%s", queue->_lost.error_msg);
302       }
303    }
304 
305    vk_logd(VK_LOG_OBJS(device), "Timeline mode is %s.",
306            timeline_mode_str(device));
307 }
308 
309 VkResult
_vk_device_set_lost(struct vk_device * device,const char * file,int line,const char * msg,...)310 _vk_device_set_lost(struct vk_device *device,
311                     const char *file, int line,
312                     const char *msg, ...)
313 {
314    /* This flushes out any per-queue device lost messages */
315    if (vk_device_is_lost(device))
316       return VK_ERROR_DEVICE_LOST;
317 
318    p_atomic_inc(&device->_lost.lost);
319    device->_lost.reported = true;
320 
321    va_list ap;
322    va_start(ap, msg);
323    __vk_errorv(device, VK_ERROR_DEVICE_LOST, file, line, msg, ap);
324    va_end(ap);
325 
326    vk_logd(VK_LOG_OBJS(device), "Timeline mode is %s.",
327            timeline_mode_str(device));
328 
329    if (debug_get_bool_option("MESA_VK_ABORT_ON_DEVICE_LOSS", false))
330       abort();
331 
332    return VK_ERROR_DEVICE_LOST;
333 }
334 
335 PFN_vkVoidFunction
vk_device_get_proc_addr(const struct vk_device * device,const char * name)336 vk_device_get_proc_addr(const struct vk_device *device,
337                         const char *name)
338 {
339    if (device == NULL || name == NULL)
340       return NULL;
341 
342    struct vk_instance *instance = device->physical->instance;
343    return vk_device_dispatch_table_get_if_supported(&device->dispatch_table,
344                                                     name,
345                                                     instance->app_info.api_version,
346                                                     &instance->enabled_extensions,
347                                                     &device->enabled_extensions);
348 }
349 
350 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
vk_common_GetDeviceProcAddr(VkDevice _device,const char * pName)351 vk_common_GetDeviceProcAddr(VkDevice _device,
352                             const char *pName)
353 {
354    VK_FROM_HANDLE(vk_device, device, _device);
355    return vk_device_get_proc_addr(device, pName);
356 }
357 
358 VKAPI_ATTR void VKAPI_CALL
vk_common_GetDeviceQueue(VkDevice _device,uint32_t queueFamilyIndex,uint32_t queueIndex,VkQueue * pQueue)359 vk_common_GetDeviceQueue(VkDevice _device,
360                          uint32_t queueFamilyIndex,
361                          uint32_t queueIndex,
362                          VkQueue *pQueue)
363 {
364    VK_FROM_HANDLE(vk_device, device, _device);
365 
366    const VkDeviceQueueInfo2 info = {
367       .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
368       .pNext = NULL,
369       /* flags = 0 because (Vulkan spec 1.2.170 - vkGetDeviceQueue):
370        *
371        *    "vkGetDeviceQueue must only be used to get queues that were
372        *     created with the flags parameter of VkDeviceQueueCreateInfo set
373        *     to zero. To get queues that were created with a non-zero flags
374        *     parameter use vkGetDeviceQueue2."
375        */
376       .flags = 0,
377       .queueFamilyIndex = queueFamilyIndex,
378       .queueIndex = queueIndex,
379    };
380 
381    device->dispatch_table.GetDeviceQueue2(_device, &info, pQueue);
382 }
383 
384 VKAPI_ATTR void VKAPI_CALL
vk_common_GetDeviceQueue2(VkDevice _device,const VkDeviceQueueInfo2 * pQueueInfo,VkQueue * pQueue)385 vk_common_GetDeviceQueue2(VkDevice _device,
386                           const VkDeviceQueueInfo2 *pQueueInfo,
387                           VkQueue *pQueue)
388 {
389    VK_FROM_HANDLE(vk_device, device, _device);
390 
391    struct vk_queue *queue = NULL;
392    vk_foreach_queue(iter, device) {
393       if (iter->queue_family_index == pQueueInfo->queueFamilyIndex &&
394           iter->index_in_family == pQueueInfo->queueIndex) {
395          queue = iter;
396          break;
397       }
398    }
399 
400    /* From the Vulkan 1.1.70 spec:
401     *
402     *    "The queue returned by vkGetDeviceQueue2 must have the same flags
403     *    value from this structure as that used at device creation time in a
404     *    VkDeviceQueueCreateInfo instance. If no matching flags were specified
405     *    at device creation time then pQueue will return VK_NULL_HANDLE."
406     */
407    if (queue && queue->flags == pQueueInfo->flags)
408       *pQueue = vk_queue_to_handle(queue);
409    else
410       *pQueue = VK_NULL_HANDLE;
411 }
412 
413 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_MapMemory(VkDevice _device,VkDeviceMemory memory,VkDeviceSize offset,VkDeviceSize size,VkMemoryMapFlags flags,void ** ppData)414 vk_common_MapMemory(VkDevice _device,
415                     VkDeviceMemory memory,
416                     VkDeviceSize offset,
417                     VkDeviceSize size,
418                     VkMemoryMapFlags flags,
419                     void **ppData)
420 {
421    VK_FROM_HANDLE(vk_device, device, _device);
422 
423    const VkMemoryMapInfoKHR info = {
424       .sType = VK_STRUCTURE_TYPE_MEMORY_MAP_INFO_KHR,
425       .flags = flags,
426       .memory = memory,
427       .offset = offset,
428       .size = size,
429    };
430 
431    return device->dispatch_table.MapMemory2KHR(_device, &info, ppData);
432 }
433 
434 VKAPI_ATTR void VKAPI_CALL
vk_common_UnmapMemory(VkDevice _device,VkDeviceMemory memory)435 vk_common_UnmapMemory(VkDevice _device,
436                       VkDeviceMemory memory)
437 {
438    VK_FROM_HANDLE(vk_device, device, _device);
439    ASSERTED VkResult result;
440 
441    const VkMemoryUnmapInfoKHR info = {
442       .sType = VK_STRUCTURE_TYPE_MEMORY_UNMAP_INFO_KHR,
443       .memory = memory,
444    };
445 
446    result = device->dispatch_table.UnmapMemory2KHR(_device, &info);
447    assert(result == VK_SUCCESS);
448 }
449 
450 VKAPI_ATTR void VKAPI_CALL
vk_common_GetDeviceGroupPeerMemoryFeatures(VkDevice device,uint32_t heapIndex,uint32_t localDeviceIndex,uint32_t remoteDeviceIndex,VkPeerMemoryFeatureFlags * pPeerMemoryFeatures)451 vk_common_GetDeviceGroupPeerMemoryFeatures(
452    VkDevice device,
453    uint32_t heapIndex,
454    uint32_t localDeviceIndex,
455    uint32_t remoteDeviceIndex,
456    VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
457 {
458    assert(localDeviceIndex == 0 && remoteDeviceIndex == 0);
459    *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
460                           VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
461                           VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
462                           VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
463 }
464 
465 VKAPI_ATTR void VKAPI_CALL
vk_common_GetImageMemoryRequirements(VkDevice _device,VkImage image,VkMemoryRequirements * pMemoryRequirements)466 vk_common_GetImageMemoryRequirements(VkDevice _device,
467                                      VkImage image,
468                                      VkMemoryRequirements *pMemoryRequirements)
469 {
470    VK_FROM_HANDLE(vk_device, device, _device);
471 
472    VkImageMemoryRequirementsInfo2 info = {
473       .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2,
474       .image = image,
475    };
476    VkMemoryRequirements2 reqs = {
477       .sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
478    };
479    device->dispatch_table.GetImageMemoryRequirements2(_device, &info, &reqs);
480 
481    *pMemoryRequirements = reqs.memoryRequirements;
482 }
483 
484 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_BindImageMemory(VkDevice _device,VkImage image,VkDeviceMemory memory,VkDeviceSize memoryOffset)485 vk_common_BindImageMemory(VkDevice _device,
486                           VkImage image,
487                           VkDeviceMemory memory,
488                           VkDeviceSize memoryOffset)
489 {
490    VK_FROM_HANDLE(vk_device, device, _device);
491 
492    VkBindImageMemoryInfo bind = {
493       .sType         = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO,
494       .image         = image,
495       .memory        = memory,
496       .memoryOffset  = memoryOffset,
497    };
498 
499    return device->dispatch_table.BindImageMemory2(_device, 1, &bind);
500 }
501 
502 VKAPI_ATTR void VKAPI_CALL
vk_common_GetImageSparseMemoryRequirements(VkDevice _device,VkImage image,uint32_t * pSparseMemoryRequirementCount,VkSparseImageMemoryRequirements * pSparseMemoryRequirements)503 vk_common_GetImageSparseMemoryRequirements(VkDevice _device,
504                                            VkImage image,
505                                            uint32_t *pSparseMemoryRequirementCount,
506                                            VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
507 {
508    VK_FROM_HANDLE(vk_device, device, _device);
509 
510    VkImageSparseMemoryRequirementsInfo2 info = {
511       .sType = VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2,
512       .image = image,
513    };
514 
515    if (!pSparseMemoryRequirements) {
516       device->dispatch_table.GetImageSparseMemoryRequirements2(_device,
517                                                                &info,
518                                                                pSparseMemoryRequirementCount,
519                                                                NULL);
520       return;
521    }
522 
523    STACK_ARRAY(VkSparseImageMemoryRequirements2, mem_reqs2, *pSparseMemoryRequirementCount);
524 
525    for (unsigned i = 0; i < *pSparseMemoryRequirementCount; ++i) {
526       mem_reqs2[i].sType = VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2;
527       mem_reqs2[i].pNext = NULL;
528    }
529 
530    device->dispatch_table.GetImageSparseMemoryRequirements2(_device,
531                                                             &info,
532                                                             pSparseMemoryRequirementCount,
533                                                             mem_reqs2);
534 
535    for (unsigned i = 0; i < *pSparseMemoryRequirementCount; ++i)
536       pSparseMemoryRequirements[i] = mem_reqs2[i].memoryRequirements;
537 
538    STACK_ARRAY_FINISH(mem_reqs2);
539 }
540 
541 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_DeviceWaitIdle(VkDevice _device)542 vk_common_DeviceWaitIdle(VkDevice _device)
543 {
544    MESA_TRACE_FUNC();
545 
546    VK_FROM_HANDLE(vk_device, device, _device);
547    const struct vk_device_dispatch_table *disp = &device->dispatch_table;
548 
549    vk_foreach_queue(queue, device) {
550       VkResult result = disp->QueueWaitIdle(vk_queue_to_handle(queue));
551       if (result != VK_SUCCESS)
552          return result;
553    }
554 
555    return VK_SUCCESS;
556 }
557 
558 #ifndef _WIN32
559 
560 uint64_t
vk_clock_gettime(clockid_t clock_id)561 vk_clock_gettime(clockid_t clock_id)
562 {
563    struct timespec current;
564    int ret;
565 
566    ret = clock_gettime(clock_id, &current);
567 #ifdef CLOCK_MONOTONIC_RAW
568    if (ret < 0 && clock_id == CLOCK_MONOTONIC_RAW)
569       ret = clock_gettime(CLOCK_MONOTONIC, &current);
570 #endif
571    if (ret < 0)
572       return 0;
573 
574    return (uint64_t)current.tv_sec * 1000000000ULL + current.tv_nsec;
575 }
576 
577 #endif //!_WIN32
578