xref: /aosp_15_r20/external/mesa3d/src/freedreno/drm/freedreno_device.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2012-2018 Rob Clark <[email protected]>
3  * SPDX-License-Identifier: MIT
4  *
5  * Authors:
6  *    Rob Clark <[email protected]>
7  */
8 
9 #include <unistd.h>
10 #include <sys/stat.h>
11 #include <sys/types.h>
12 
13 #include "util/os_file.h"
14 #include "util/u_process.h"
15 
16 #include "freedreno_rd_output.h"
17 
18 #include "freedreno_drmif.h"
19 #include "freedreno_drm_perfetto.h"
20 #include "freedreno_priv.h"
21 
22 struct fd_device *msm_device_new(int fd, drmVersionPtr version);
23 #ifdef HAVE_FREEDRENO_VIRTIO
24 struct fd_device *virtio_device_new(int fd, drmVersionPtr version);
25 #endif
26 
27 uint64_t os_page_size = 4096;
28 
29 struct fd_device *
fd_device_new(int fd)30 fd_device_new(int fd)
31 {
32    struct fd_device *dev = NULL;
33    drmVersionPtr version;
34    bool use_heap = false;
35 
36    os_get_page_size(&os_page_size);
37 
38    /* figure out if we are kgsl or msm drm driver: */
39    version = drmGetVersion(fd);
40    if (!version) {
41       ERROR_MSG("cannot get version: %s", strerror(errno));
42       return NULL;
43    }
44 
45    if (!strcmp(version->name, "msm")) {
46       DEBUG_MSG("msm DRM device");
47       if (version->version_major != 1) {
48          ERROR_MSG("unsupported version: %u.%u.%u", version->version_major,
49                    version->version_minor, version->version_patchlevel);
50          goto out;
51       }
52 
53       dev = msm_device_new(fd, version);
54 #ifdef HAVE_FREEDRENO_VIRTIO
55    } else if (!strcmp(version->name, "virtio_gpu")) {
56       DEBUG_MSG("virtio_gpu DRM device");
57       dev = virtio_device_new(fd, version);
58       /* Only devices that support a hypervisor are a6xx+, so avoid the
59        * extra guest<->host round trips associated with pipe creation:
60        */
61       use_heap = true;
62 #endif
63 #if HAVE_FREEDRENO_KGSL
64    } else if (!strcmp(version->name, "kgsl")) {
65       DEBUG_MSG("kgsl DRM device");
66       dev = kgsl_device_new(fd);
67 #endif
68    }
69 
70    if (!dev) {
71       INFO_MSG("unsupported device: %s", version->name);
72       goto out;
73    }
74 
75 out:
76    drmFreeVersion(version);
77 
78    if (!dev)
79       return NULL;
80 
81    fd_drm_perfetto_init();
82 
83    fd_rd_dump_env_init();
84    fd_rd_output_init(&dev->rd, util_get_process_name());
85 
86    p_atomic_set(&dev->refcnt, 1);
87    dev->fd = fd;
88    dev->handle_table =
89       _mesa_hash_table_create(NULL, _mesa_hash_u32, _mesa_key_u32_equal);
90    dev->name_table =
91       _mesa_hash_table_create(NULL, _mesa_hash_u32, _mesa_key_u32_equal);
92    fd_bo_cache_init(&dev->bo_cache, false, "bo");
93    fd_bo_cache_init(&dev->ring_cache, true, "ring");
94 
95    list_inithead(&dev->deferred_submits);
96    simple_mtx_init(&dev->submit_lock, mtx_plain);
97    simple_mtx_init(&dev->suballoc_lock, mtx_plain);
98 
99    if (!use_heap) {
100       struct fd_pipe *pipe = fd_pipe_new(dev, FD_PIPE_3D);
101 
102       if (!pipe)
103          goto fail;
104 
105       /* Userspace fences don't appear to be reliable enough (missing some
106        * cache flushes?) on older gens, so limit sub-alloc heaps to a6xx+
107        * for now:
108        */
109       use_heap = fd_dev_gen(&pipe->dev_id) >= 6;
110 
111       fd_pipe_del(pipe);
112    }
113 
114    if (use_heap) {
115       dev->ring_heap = fd_bo_heap_new(dev, RING_FLAGS);
116       dev->default_heap = fd_bo_heap_new(dev, 0);
117    }
118 
119    return dev;
120 
121 fail:
122    fd_device_del(dev);
123    return NULL;
124 }
125 
126 /* like fd_device_new() but creates it's own private dup() of the fd
127  * which is close()d when the device is finalized.
128  */
129 struct fd_device *
fd_device_new_dup(int fd)130 fd_device_new_dup(int fd)
131 {
132    int dup_fd = os_dupfd_cloexec(fd);
133    struct fd_device *dev = fd_device_new(dup_fd);
134    if (dev)
135       dev->closefd = 1;
136    else
137       close(dup_fd);
138    return dev;
139 }
140 
141 /* Convenience helper to open the drm device and return new fd_device:
142  */
143 struct fd_device *
fd_device_open(void)144 fd_device_open(void)
145 {
146    int fd;
147 
148    fd = drmOpenWithType("msm", NULL, DRM_NODE_RENDER);
149 #ifdef HAVE_FREEDRENO_VIRTIO
150    if (fd < 0)
151       fd = drmOpenWithType("virtio_gpu", NULL, DRM_NODE_RENDER);
152 #endif
153    if (fd < 0)
154       return NULL;
155 
156    return fd_device_new(fd);
157 }
158 
159 struct fd_device *
fd_device_ref(struct fd_device * dev)160 fd_device_ref(struct fd_device *dev)
161 {
162    ref(&dev->refcnt);
163    return dev;
164 }
165 
166 void
fd_device_purge(struct fd_device * dev)167 fd_device_purge(struct fd_device *dev)
168 {
169    fd_bo_cache_cleanup(&dev->bo_cache, 0);
170    fd_bo_cache_cleanup(&dev->ring_cache, 0);
171 }
172 
173 void
fd_device_del(struct fd_device * dev)174 fd_device_del(struct fd_device *dev)
175 {
176    if (!unref(&dev->refcnt))
177       return;
178 
179    fd_rd_output_fini(&dev->rd);
180 
181    assert(list_is_empty(&dev->deferred_submits));
182    assert(!dev->deferred_submits_fence);
183 
184    if (dev->suballoc_bo)
185       fd_bo_del(dev->suballoc_bo);
186 
187    if (dev->ring_heap)
188       fd_bo_heap_destroy(dev->ring_heap);
189 
190    if (dev->default_heap)
191       fd_bo_heap_destroy(dev->default_heap);
192 
193    fd_bo_cache_cleanup(&dev->bo_cache, 0);
194    fd_bo_cache_cleanup(&dev->ring_cache, 0);
195 
196    /* Needs to be after bo cache cleanup in case backend has a
197     * util_vma_heap that it destroys:
198     */
199    dev->funcs->destroy(dev);
200 
201    _mesa_hash_table_destroy(dev->handle_table, NULL);
202    _mesa_hash_table_destroy(dev->name_table, NULL);
203 
204    if (fd_device_threaded_submit(dev))
205       util_queue_destroy(&dev->submit_queue);
206 
207    if (dev->closefd)
208       close(dev->fd);
209 
210    free(dev);
211 }
212 
213 int
fd_device_fd(struct fd_device * dev)214 fd_device_fd(struct fd_device *dev)
215 {
216    return dev->fd;
217 }
218 
219 enum fd_version
fd_device_version(struct fd_device * dev)220 fd_device_version(struct fd_device *dev)
221 {
222    return dev->version;
223 }
224 
225 DEBUG_GET_ONCE_BOOL_OPTION(libgl, "LIBGL_DEBUG", false)
226 
227 bool
fd_dbg(void)228 fd_dbg(void)
229 {
230    return debug_get_option_libgl();
231 }
232 
233 bool
fd_has_syncobj(struct fd_device * dev)234 fd_has_syncobj(struct fd_device *dev)
235 {
236    uint64_t value;
237    if (drmGetCap(dev->fd, DRM_CAP_SYNCOBJ, &value))
238       return false;
239    return value && dev->version >= FD_VERSION_FENCE_FD;
240 }
241