xref: /aosp_15_r20/external/mesa3d/src/freedreno/drm/virtio/virtio_priv.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2022 Google, Inc.
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #ifndef VIRTIO_PRIV_H_
7 #define VIRTIO_PRIV_H_
8 
9 #include <poll.h>
10 
11 #include "freedreno_priv.h"
12 
13 #include "util/perf/cpu_trace.h"
14 #include "util/u_atomic.h"
15 #include "util/slab.h"
16 #include "util/timespec.h"
17 #include "util/vma.h"
18 
19 #include "drm-uapi/virtgpu_drm.h"
20 /* We also use some types/defines from the host drm/msm uabi: */
21 #include "drm-uapi/msm_drm.h"
22 
23 #include "virglrenderer_hw.h"
24 #include "msm_proto.h"
25 
26 #include "vdrm.h"
27 
28 struct virtio_device {
29    struct fd_device base;
30 
31    struct vdrm_device *vdrm;
32 
33    uint32_t next_blob_id;
34    struct msm_shmem *shmem;
35 
36    /*
37     * Notes on address space allocation:
38     *
39     * In both the import (GEM_INFO) and new (GEM_NEW) path we allocate
40     * the iova.  Since the iova (vma on kernel side) is local to the
41     * address space, and that is 1:1 with drm fd (which is 1:1 with
42     * virtio_device and therefore address_space) which is not shared
43     * with anything outside of the driver, and because of the handle
44     * de-duplication, we can safely assume that an iova has not yet
45     * been set on imported buffers.
46     *
47     * The other complication with userspace allocated iova is that
48     * the kernel holds on to a reference to the bo (and the GPU is
49     * still using it's iova) until the submit retires.  So a per-pipe
50     * retire_queue is used to hold an extra reference to the submit
51     * (and indirectly all the bo's referenced) until the out-fence is
52     * signaled.
53     */
54    struct util_vma_heap address_space;
55    simple_mtx_t address_space_lock;
56 };
57 FD_DEFINE_CAST(fd_device, virtio_device);
58 
59 struct fd_device *virtio_device_new(int fd, drmVersionPtr version);
60 
61 static inline void
virtio_dev_free_iova(struct fd_device * dev,uint64_t iova,uint32_t size)62 virtio_dev_free_iova(struct fd_device *dev, uint64_t iova, uint32_t size)
63 {
64    struct virtio_device *virtio_dev = to_virtio_device(dev);
65 
66    simple_mtx_lock(&virtio_dev->address_space_lock);
67    util_vma_heap_free(&virtio_dev->address_space, iova, size);
68    simple_mtx_unlock(&virtio_dev->address_space_lock);
69 }
70 
71 static inline uint64_t
virtio_dev_alloc_iova(struct fd_device * dev,uint32_t size)72 virtio_dev_alloc_iova(struct fd_device *dev, uint32_t size)
73 {
74    struct virtio_device *virtio_dev = to_virtio_device(dev);
75    uint64_t iova;
76 
77    simple_mtx_lock(&virtio_dev->address_space_lock);
78    iova = util_vma_heap_alloc(&virtio_dev->address_space, size, os_page_size);
79    simple_mtx_unlock(&virtio_dev->address_space_lock);
80 
81    return iova;
82 }
83 
84 struct virtio_pipe {
85    struct fd_pipe base;
86    uint32_t pipe;
87    uint32_t gpu_id;
88    uint64_t chip_id;
89    uint64_t gmem_base;
90    uint32_t gmem;
91    uint32_t queue_id;
92    uint32_t ring_idx;
93    struct slab_parent_pool ring_pool;
94 
95    /**
96     * We know that the kernel allocated fence seqno's sequentially per-
97     * submitqueue in a range 1..INT_MAX, which is incremented *after* any
98     * point where the submit ioctl could be restarted.  So we just *guess*
99     * what the next seqno fence will be to avoid having to synchronize the
100     * submit with the host.
101     *
102     * TODO maybe we need version_minor bump so we can make the 1..INT_MAX
103     * assumption.. it is only really true after:
104     *
105     *   ca3ffcbeb0c8 ("drm/msm/gpu: Don't allow zero fence_id")
106     */
107    int32_t next_submit_fence;
108 
109    /**
110     * When userspace allocates iova, we need to defer deleting bo's (and
111     * therefore releasing their address) until submits referencing them
112     * have completed.  This is accomplished by enqueueing a job, holding
113     * a reference to the submit, that waits on the submit's out-fence
114     * before dropping the reference to the submit.  The submit holds a
115     * reference to the associated ring buffers, which in turn hold a ref
116     * to the associated bo's.
117     */
118    struct util_queue retire_queue;
119 };
120 FD_DEFINE_CAST(fd_pipe, virtio_pipe);
121 
122 struct fd_pipe *virtio_pipe_new(struct fd_device *dev, enum fd_pipe_id id,
123                                 uint32_t prio);
124 
125 struct fd_submit *virtio_submit_new(struct fd_pipe *pipe);
126 
127 struct virtio_bo {
128    struct fd_bo base;
129    uint64_t alloc_time_ns;
130    uint64_t offset;
131    uint32_t res_id;
132    uint32_t blob_id;
133    uint32_t upload_seqno;
134    bool has_upload_seqno;
135 };
136 FD_DEFINE_CAST(fd_bo, virtio_bo);
137 
138 struct fd_bo *virtio_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags);
139 struct fd_bo *virtio_bo_from_handle(struct fd_device *dev, uint32_t size,
140                                     uint32_t handle);
141 
142 /*
143  * Internal helpers:
144  */
145 int virtio_simple_ioctl(struct fd_device *dev, unsigned cmd, void *req);
146 
147 #endif /* VIRTIO_PRIV_H_ */
148