xref: /aosp_15_r20/external/mesa3d/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2009 Corbin Simpson <[email protected]>
3  * Copyright © 2009 Joakim Sindholt <[email protected]>
4  * Copyright © 2011 Marek Olšák <[email protected]>
5  * Copyright © 2015 Advanced Micro Devices, Inc.
6  *
7  * SPDX-License-Identifier: MIT
8  */
9 
10 #include "amdgpu_cs.h"
11 
12 #include "util/os_file.h"
13 #include "util/os_misc.h"
14 #include "util/u_cpu_detect.h"
15 #include "util/u_hash_table.h"
16 #include "util/hash_table.h"
17 #include "util/thread_sched.h"
18 #include "util/xmlconfig.h"
19 #include "drm-uapi/amdgpu_drm.h"
20 #include <xf86drm.h>
21 #include <stdio.h>
22 #include <sys/stat.h>
23 #include <fcntl.h>
24 #include "sid.h"
25 
26 static struct hash_table *dev_tab = NULL;
27 static simple_mtx_t dev_tab_mutex = SIMPLE_MTX_INITIALIZER;
28 
29 #if MESA_DEBUG
30 DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", false)
31 #endif
32 
33 /* Helper function to do the ioctls needed for setup and init. */
do_winsys_init(struct amdgpu_winsys * aws,const struct pipe_screen_config * config,int fd)34 static bool do_winsys_init(struct amdgpu_winsys *aws,
35                            const struct pipe_screen_config *config,
36                            int fd)
37 {
38    if (!ac_query_gpu_info(fd, aws->dev, &aws->info, false))
39       goto fail;
40 
41    /* TODO: Enable this once the kernel handles it efficiently. */
42    if (aws->info.has_dedicated_vram)
43       aws->info.has_local_buffers = false;
44 
45    aws->addrlib = ac_addrlib_create(&aws->info, &aws->info.max_alignment);
46    if (!aws->addrlib) {
47       fprintf(stderr, "amdgpu: Cannot create addrlib.\n");
48       goto fail;
49    }
50 
51    aws->check_vm = strstr(debug_get_option("R600_DEBUG", ""), "check_vm") != NULL ||
52                   strstr(debug_get_option("AMD_DEBUG", ""), "check_vm") != NULL;
53    aws->noop_cs = aws->info.family_overridden || debug_get_bool_option("RADEON_NOOP", false);
54 #if MESA_DEBUG
55    aws->debug_all_bos = debug_get_option_all_bos();
56 #endif
57    aws->reserve_vmid = strstr(debug_get_option("R600_DEBUG", ""), "reserve_vmid") != NULL ||
58                       strstr(debug_get_option("AMD_DEBUG", ""), "reserve_vmid") != NULL ||
59                       strstr(debug_get_option("AMD_DEBUG", ""), "sqtt") != NULL;
60    aws->zero_all_vram_allocs = strstr(debug_get_option("R600_DEBUG", ""), "zerovram") != NULL ||
61                               driQueryOptionb(config->options, "radeonsi_zerovram");
62 
63    return true;
64 
65 fail:
66    amdgpu_device_deinitialize(aws->dev);
67    aws->dev = NULL;
68    return false;
69 }
70 
do_winsys_deinit(struct amdgpu_winsys * aws)71 static void do_winsys_deinit(struct amdgpu_winsys *aws)
72 {
73    if (aws->reserve_vmid)
74       amdgpu_vm_unreserve_vmid(aws->dev, 0);
75 
76    for (unsigned i = 0; i < ARRAY_SIZE(aws->queues); i++) {
77       for (unsigned j = 0; j < ARRAY_SIZE(aws->queues[i].fences); j++)
78          amdgpu_fence_reference(&aws->queues[i].fences[j], NULL);
79 
80       amdgpu_ctx_reference(&aws->queues[i].last_ctx, NULL);
81    }
82 
83    if (util_queue_is_initialized(&aws->cs_queue))
84       util_queue_destroy(&aws->cs_queue);
85 
86    simple_mtx_destroy(&aws->bo_fence_lock);
87    if (aws->bo_slabs.groups)
88       pb_slabs_deinit(&aws->bo_slabs);
89    pb_cache_deinit(&aws->bo_cache);
90    _mesa_hash_table_destroy(aws->bo_export_table, NULL);
91    simple_mtx_destroy(&aws->sws_list_lock);
92 #if MESA_DEBUG
93    simple_mtx_destroy(&aws->global_bo_list_lock);
94 #endif
95    simple_mtx_destroy(&aws->bo_export_table_lock);
96 
97    ac_addrlib_destroy(aws->addrlib);
98    amdgpu_device_deinitialize(aws->dev);
99    FREE(aws);
100 }
101 
amdgpu_winsys_destroy_locked(struct radeon_winsys * rws,bool locked)102 static void amdgpu_winsys_destroy_locked(struct radeon_winsys *rws, bool locked)
103 {
104    struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
105    struct amdgpu_winsys *aws = sws->aws;
106    bool destroy;
107 
108    /* When the reference counter drops to zero, remove the device pointer
109     * from the table.
110     * This must happen while the mutex is locked, so that
111     * amdgpu_winsys_create in another thread doesn't get the winsys
112     * from the table when the counter drops to 0.
113     */
114    if (!locked)
115       simple_mtx_lock(&dev_tab_mutex);
116 
117    destroy = pipe_reference(&aws->reference, NULL);
118    if (destroy && dev_tab) {
119       _mesa_hash_table_remove_key(dev_tab, aws->dev);
120       if (_mesa_hash_table_num_entries(dev_tab) == 0) {
121          _mesa_hash_table_destroy(dev_tab, NULL);
122          dev_tab = NULL;
123       }
124    }
125 
126    if (!locked)
127       simple_mtx_unlock(&dev_tab_mutex);
128 
129    if (destroy)
130       do_winsys_deinit(aws);
131 
132    close(sws->fd);
133    FREE(rws);
134 }
135 
amdgpu_winsys_destroy(struct radeon_winsys * rws)136 static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
137 {
138    amdgpu_winsys_destroy_locked(rws, false);
139 }
140 
amdgpu_winsys_query_info(struct radeon_winsys * rws,struct radeon_info * info)141 static void amdgpu_winsys_query_info(struct radeon_winsys *rws, struct radeon_info *info)
142 {
143    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
144 
145    *info = aws->info;
146 }
147 
amdgpu_cs_request_feature(struct radeon_cmdbuf * rcs,enum radeon_feature_id fid,bool enable)148 static bool amdgpu_cs_request_feature(struct radeon_cmdbuf *rcs,
149                                       enum radeon_feature_id fid,
150                                       bool enable)
151 {
152    return false;
153 }
154 
amdgpu_query_value(struct radeon_winsys * rws,enum radeon_value_id value)155 static uint64_t amdgpu_query_value(struct radeon_winsys *rws,
156                                    enum radeon_value_id value)
157 {
158    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
159    struct amdgpu_heap_info heap;
160    uint64_t retval = 0;
161 
162    switch (value) {
163    case RADEON_REQUESTED_VRAM_MEMORY:
164       return aws->allocated_vram;
165    case RADEON_REQUESTED_GTT_MEMORY:
166       return aws->allocated_gtt;
167    case RADEON_MAPPED_VRAM:
168       return aws->mapped_vram;
169    case RADEON_MAPPED_GTT:
170       return aws->mapped_gtt;
171    case RADEON_SLAB_WASTED_VRAM:
172       return aws->slab_wasted_vram;
173    case RADEON_SLAB_WASTED_GTT:
174       return aws->slab_wasted_gtt;
175    case RADEON_BUFFER_WAIT_TIME_NS:
176       return aws->buffer_wait_time;
177    case RADEON_NUM_MAPPED_BUFFERS:
178       return aws->num_mapped_buffers;
179    case RADEON_TIMESTAMP:
180       amdgpu_query_info(aws->dev, AMDGPU_INFO_TIMESTAMP, 8, &retval);
181       return retval;
182    case RADEON_NUM_GFX_IBS:
183       return aws->num_gfx_IBs;
184    case RADEON_NUM_SDMA_IBS:
185       return aws->num_sdma_IBs;
186    case RADEON_GFX_BO_LIST_COUNTER:
187       return aws->gfx_bo_list_counter;
188    case RADEON_GFX_IB_SIZE_COUNTER:
189       return aws->gfx_ib_size_counter;
190    case RADEON_NUM_BYTES_MOVED:
191       amdgpu_query_info(aws->dev, AMDGPU_INFO_NUM_BYTES_MOVED, 8, &retval);
192       return retval;
193    case RADEON_NUM_EVICTIONS:
194       amdgpu_query_info(aws->dev, AMDGPU_INFO_NUM_EVICTIONS, 8, &retval);
195       return retval;
196    case RADEON_NUM_VRAM_CPU_PAGE_FAULTS:
197       amdgpu_query_info(aws->dev, AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS, 8, &retval);
198       return retval;
199    case RADEON_VRAM_USAGE:
200       amdgpu_query_heap_info(aws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &heap);
201       return heap.heap_usage;
202    case RADEON_VRAM_VIS_USAGE:
203       amdgpu_query_heap_info(aws->dev, AMDGPU_GEM_DOMAIN_VRAM,
204                              AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, &heap);
205       return heap.heap_usage;
206    case RADEON_GTT_USAGE:
207       amdgpu_query_heap_info(aws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &heap);
208       return heap.heap_usage;
209    case RADEON_GPU_TEMPERATURE:
210       amdgpu_query_sensor_info(aws->dev, AMDGPU_INFO_SENSOR_GPU_TEMP, 4, &retval);
211       return retval;
212    case RADEON_CURRENT_SCLK:
213       amdgpu_query_sensor_info(aws->dev, AMDGPU_INFO_SENSOR_GFX_SCLK, 4, &retval);
214       return retval;
215    case RADEON_CURRENT_MCLK:
216       amdgpu_query_sensor_info(aws->dev, AMDGPU_INFO_SENSOR_GFX_MCLK, 4, &retval);
217       return retval;
218    case RADEON_CS_THREAD_TIME:
219       return util_queue_get_thread_time_nano(&aws->cs_queue, 0);
220    }
221    return 0;
222 }
223 
amdgpu_read_registers(struct radeon_winsys * rws,unsigned reg_offset,unsigned num_registers,uint32_t * out)224 static bool amdgpu_read_registers(struct radeon_winsys *rws,
225                                   unsigned reg_offset,
226                                   unsigned num_registers, uint32_t *out)
227 {
228    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
229 
230    return amdgpu_read_mm_registers(aws->dev, reg_offset / 4, num_registers,
231                                    0xffffffff, 0, out) == 0;
232 }
233 
amdgpu_winsys_unref(struct radeon_winsys * rws)234 static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
235 {
236    struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
237    struct amdgpu_winsys *aws = sws->aws;
238    bool ret;
239 
240    simple_mtx_lock(&aws->sws_list_lock);
241 
242    ret = pipe_reference(&sws->reference, NULL);
243    if (ret) {
244       struct amdgpu_screen_winsys **sws_iter;
245       struct amdgpu_winsys *aws = sws->aws;
246 
247       /* Remove this amdgpu_screen_winsys from amdgpu_winsys' list, so that
248        * amdgpu_winsys_create can't re-use it anymore
249        */
250       for (sws_iter = &aws->sws_list; *sws_iter; sws_iter = &(*sws_iter)->next) {
251          if (*sws_iter == sws) {
252             *sws_iter = sws->next;
253             break;
254          }
255       }
256    }
257 
258    simple_mtx_unlock(&aws->sws_list_lock);
259 
260    if (ret && sws->kms_handles) {
261       struct drm_gem_close args;
262 
263       hash_table_foreach(sws->kms_handles, entry) {
264          args.handle = (uintptr_t)entry->data;
265          drmIoctl(sws->fd, DRM_IOCTL_GEM_CLOSE, &args);
266       }
267       _mesa_hash_table_destroy(sws->kms_handles, NULL);
268    }
269 
270    return ret;
271 }
272 
amdgpu_pin_threads_to_L3_cache(struct radeon_winsys * rws,unsigned cpu)273 static void amdgpu_pin_threads_to_L3_cache(struct radeon_winsys *rws,
274                                            unsigned cpu)
275 {
276    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
277 
278    util_thread_sched_apply_policy(aws->cs_queue.threads[0],
279                                   UTIL_THREAD_DRIVER_SUBMIT, cpu, NULL);
280 }
281 
kms_handle_hash(const void * key)282 static uint32_t kms_handle_hash(const void *key)
283 {
284    const struct amdgpu_bo_real *bo = key;
285 
286    return bo->kms_handle;
287 }
288 
kms_handle_equals(const void * a,const void * b)289 static bool kms_handle_equals(const void *a, const void *b)
290 {
291    return a == b;
292 }
293 
amdgpu_cs_is_secure(struct radeon_cmdbuf * rcs)294 static bool amdgpu_cs_is_secure(struct radeon_cmdbuf *rcs)
295 {
296    struct amdgpu_cs *cs = amdgpu_cs(rcs);
297    return cs->csc->secure;
298 }
299 
300 static uint32_t
radeon_to_amdgpu_pstate(enum radeon_ctx_pstate pstate)301 radeon_to_amdgpu_pstate(enum radeon_ctx_pstate pstate)
302 {
303    switch (pstate) {
304    case RADEON_CTX_PSTATE_NONE:
305       return AMDGPU_CTX_STABLE_PSTATE_NONE;
306    case RADEON_CTX_PSTATE_STANDARD:
307       return AMDGPU_CTX_STABLE_PSTATE_STANDARD;
308    case RADEON_CTX_PSTATE_MIN_SCLK:
309       return AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK;
310    case RADEON_CTX_PSTATE_MIN_MCLK:
311       return AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK;
312    case RADEON_CTX_PSTATE_PEAK:
313       return AMDGPU_CTX_STABLE_PSTATE_PEAK;
314    default:
315       unreachable("Invalid pstate");
316    }
317 }
318 
319 static bool
amdgpu_cs_set_pstate(struct radeon_cmdbuf * rcs,enum radeon_ctx_pstate pstate)320 amdgpu_cs_set_pstate(struct radeon_cmdbuf *rcs, enum radeon_ctx_pstate pstate)
321 {
322    struct amdgpu_cs *cs = amdgpu_cs(rcs);
323 
324    if (!cs->aws->info.has_stable_pstate)
325       return false;
326 
327    uint32_t amdgpu_pstate = radeon_to_amdgpu_pstate(pstate);
328    return amdgpu_cs_ctx_stable_pstate(cs->ctx->ctx,
329       AMDGPU_CTX_OP_SET_STABLE_PSTATE, amdgpu_pstate, NULL) == 0;
330 }
331 
332 static bool
are_file_descriptions_equal(int fd1,int fd2)333 are_file_descriptions_equal(int fd1, int fd2)
334 {
335    int r = os_same_file_description(fd1, fd2);
336 
337    if (r == 0)
338       return true;
339 
340    if (r < 0) {
341       static bool logged;
342 
343       if (!logged) {
344          os_log_message("amdgpu: os_same_file_description couldn't "
345                         "determine if two DRM fds reference the same "
346                         "file description.\n"
347                         "If they do, bad things may happen!\n");
348          logged = true;
349       }
350    }
351    return false;
352 }
353 
354 static int
amdgpu_drm_winsys_get_fd(struct radeon_winsys * rws)355 amdgpu_drm_winsys_get_fd(struct radeon_winsys *rws)
356 {
357    struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
358 
359    return sws->fd;
360 }
361 
362 PUBLIC struct radeon_winsys *
amdgpu_winsys_create(int fd,const struct pipe_screen_config * config,radeon_screen_create_t screen_create)363 amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
364 		     radeon_screen_create_t screen_create)
365 {
366    struct amdgpu_screen_winsys *sws;
367    struct amdgpu_winsys *aws;
368    amdgpu_device_handle dev;
369    uint32_t drm_major, drm_minor;
370    int r;
371 
372    sws = CALLOC_STRUCT(amdgpu_screen_winsys);
373    if (!sws)
374       return NULL;
375 
376    pipe_reference_init(&sws->reference, 1);
377    sws->fd = os_dupfd_cloexec(fd);
378 
379    /* Look up the winsys from the dev table. */
380    simple_mtx_lock(&dev_tab_mutex);
381    if (!dev_tab)
382       dev_tab = util_hash_table_create_ptr_keys();
383 
384    /* Initialize the amdgpu device. This should always return the same pointer
385     * for the same fd. */
386    r = amdgpu_device_initialize(sws->fd, &drm_major, &drm_minor, &dev);
387    if (r) {
388       fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
389       goto fail;
390    }
391 
392    /* Lookup a winsys if we have already created one for this device. */
393    aws = util_hash_table_get(dev_tab, dev);
394    if (aws) {
395       struct amdgpu_screen_winsys *sws_iter;
396 
397       /* Release the device handle, because we don't need it anymore.
398        * This function is returning an existing winsys instance, which
399        * has its own device handle.
400        */
401       amdgpu_device_deinitialize(dev);
402 
403       simple_mtx_lock(&aws->sws_list_lock);
404       for (sws_iter = aws->sws_list; sws_iter; sws_iter = sws_iter->next) {
405          if (are_file_descriptions_equal(sws_iter->fd, sws->fd)) {
406             close(sws->fd);
407             FREE(sws);
408             sws = sws_iter;
409             pipe_reference(NULL, &sws->reference);
410             simple_mtx_unlock(&aws->sws_list_lock);
411             goto unlock;
412          }
413       }
414       simple_mtx_unlock(&aws->sws_list_lock);
415 
416       sws->kms_handles = _mesa_hash_table_create(NULL, kms_handle_hash,
417                                                 kms_handle_equals);
418       if (!sws->kms_handles)
419          goto fail;
420 
421       pipe_reference(NULL, &aws->reference);
422    } else {
423       /* Create a new winsys. */
424       aws = CALLOC_STRUCT(amdgpu_winsys);
425       if (!aws)
426          goto fail;
427 
428       aws->dev = dev;
429       /* The device fd might be different from the one we passed because of
430        * libdrm_amdgpu device dedup logic. This can happen if radv is initialized
431        * first.
432        * Get the correct fd or the buffer sharing will not work (see #3424).
433        */
434       int device_fd = amdgpu_device_get_fd(dev);
435       if (!are_file_descriptions_equal(device_fd, fd)) {
436          sws->kms_handles = _mesa_hash_table_create(NULL, kms_handle_hash,
437                                                    kms_handle_equals);
438          if (!sws->kms_handles)
439             goto fail;
440          /* We could avoid storing the fd and use amdgpu_device_get_fd() where
441           * we need it but we'd have to use os_same_file_description() to
442           * compare the fds.
443           */
444          aws->fd = device_fd;
445       } else {
446          aws->fd = sws->fd;
447       }
448       aws->info.drm_major = drm_major;
449       aws->info.drm_minor = drm_minor;
450 
451       /* Only aws and buffer functions are used. */
452       aws->dummy_sws.aws = aws;
453       amdgpu_bo_init_functions(&aws->dummy_sws);
454 
455       if (!do_winsys_init(aws, config, fd))
456          goto fail_alloc;
457 
458       /* Create managers. */
459       pb_cache_init(&aws->bo_cache, RADEON_NUM_HEAPS,
460                     500000, aws->check_vm ? 1.0f : 1.5f, 0,
461                     ((uint64_t)aws->info.vram_size_kb + aws->info.gart_size_kb) * 1024 / 8,
462                     offsetof(struct amdgpu_bo_real_reusable, cache_entry), aws,
463                     /* Cast to void* because one of the function parameters
464                      * is a struct pointer instead of void*. */
465                     (void*)amdgpu_bo_destroy, (void*)amdgpu_bo_can_reclaim);
466 
467       if (!pb_slabs_init(&aws->bo_slabs,
468                          8,  /* min slab entry size: 256 bytes */
469                          20, /* max slab entry size: 1 MB (slab size = 2 MB) */
470                          RADEON_NUM_HEAPS, true,
471                          aws,
472                          amdgpu_bo_can_reclaim_slab,
473                          amdgpu_bo_slab_alloc,
474                          /* Cast to void* because one of the function parameters
475                           * is a struct pointer instead of void*. */
476                          (void*)amdgpu_bo_slab_free)) {
477          amdgpu_winsys_destroy_locked(&sws->base, true);
478          simple_mtx_unlock(&dev_tab_mutex);
479          return NULL;
480       }
481 
482       aws->info.min_alloc_size = 1 << aws->bo_slabs.min_order;
483 
484       /* init reference */
485       pipe_reference_init(&aws->reference, 1);
486 #if MESA_DEBUG
487       list_inithead(&aws->global_bo_list);
488 #endif
489       aws->bo_export_table = util_hash_table_create_ptr_keys();
490 
491       (void) simple_mtx_init(&aws->sws_list_lock, mtx_plain);
492 #if MESA_DEBUG
493       (void) simple_mtx_init(&aws->global_bo_list_lock, mtx_plain);
494 #endif
495       (void) simple_mtx_init(&aws->bo_fence_lock, mtx_plain);
496       (void) simple_mtx_init(&aws->bo_export_table_lock, mtx_plain);
497 
498       if (!util_queue_init(&aws->cs_queue, "cs", 8, 1,
499                            UTIL_QUEUE_INIT_RESIZE_IF_FULL, NULL)) {
500          amdgpu_winsys_destroy_locked(&sws->base, true);
501          simple_mtx_unlock(&dev_tab_mutex);
502          return NULL;
503       }
504 
505       _mesa_hash_table_insert(dev_tab, dev, aws);
506 
507       if (aws->reserve_vmid) {
508          r = amdgpu_vm_reserve_vmid(dev, 0);
509          if (r) {
510             amdgpu_winsys_destroy_locked(&sws->base, true);
511             simple_mtx_unlock(&dev_tab_mutex);
512             return NULL;
513          }
514       }
515    }
516 
517    sws->aws = aws;
518 
519    /* Set functions. */
520    sws->base.unref = amdgpu_winsys_unref;
521    sws->base.destroy = amdgpu_winsys_destroy;
522    sws->base.get_fd = amdgpu_drm_winsys_get_fd;
523    sws->base.query_info = amdgpu_winsys_query_info;
524    sws->base.cs_request_feature = amdgpu_cs_request_feature;
525    sws->base.query_value = amdgpu_query_value;
526    sws->base.read_registers = amdgpu_read_registers;
527    sws->base.pin_threads_to_L3_cache = amdgpu_pin_threads_to_L3_cache;
528    sws->base.cs_is_secure = amdgpu_cs_is_secure;
529    sws->base.cs_set_pstate = amdgpu_cs_set_pstate;
530 
531    amdgpu_bo_init_functions(sws);
532    amdgpu_cs_init_functions(sws);
533    amdgpu_surface_init_functions(sws);
534 
535    simple_mtx_lock(&aws->sws_list_lock);
536    sws->next = aws->sws_list;
537    aws->sws_list = sws;
538    simple_mtx_unlock(&aws->sws_list_lock);
539 
540    /* Create the screen at the end. The winsys must be initialized
541     * completely.
542     *
543     * Alternatively, we could create the screen based on "ws->gen"
544     * and link all drivers into one binary blob. */
545    sws->base.screen = screen_create(&sws->base, config);
546    if (!sws->base.screen) {
547       amdgpu_winsys_destroy_locked(&sws->base, true);
548       simple_mtx_unlock(&dev_tab_mutex);
549       return NULL;
550    }
551 
552 unlock:
553    /* We must unlock the mutex once the winsys is fully initialized, so that
554     * other threads attempting to create the winsys from the same fd will
555     * get a fully initialized winsys and not just half-way initialized. */
556    simple_mtx_unlock(&dev_tab_mutex);
557 
558    return &sws->base;
559 
560 fail_alloc:
561    FREE(aws);
562 fail:
563    if (sws->kms_handles)
564       _mesa_hash_table_destroy(sws->kms_handles, NULL);
565    close(sws->fd);
566    FREE(sws);
567    simple_mtx_unlock(&dev_tab_mutex);
568    return NULL;
569 }
570