xref: /aosp_15_r20/external/mesa3d/src/freedreno/drm/freedreno_pipe.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2012-2018 Rob Clark <[email protected]>
3  * SPDX-License-Identifier: MIT
4  *
5  * Authors:
6  *    Rob Clark <[email protected]>
7  */
8 
9 #include "freedreno_drmif.h"
10 #include "freedreno_priv.h"
11 
12 /**
13  * priority of zero is highest priority, and higher numeric values are
14  * lower priorities
15  */
16 struct fd_pipe *
fd_pipe_new2(struct fd_device * dev,enum fd_pipe_id id,uint32_t prio)17 fd_pipe_new2(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio)
18 {
19    struct fd_pipe *pipe;
20    uint64_t val;
21 
22    if (id > FD_PIPE_MAX) {
23       ERROR_MSG("invalid pipe id: %d", id);
24       return NULL;
25    }
26 
27    if ((prio != 1) && (fd_device_version(dev) < FD_VERSION_SUBMIT_QUEUES)) {
28       ERROR_MSG("invalid priority!");
29       return NULL;
30    }
31 
32    pipe = dev->funcs->pipe_new(dev, id, prio);
33    if (!pipe) {
34       ERROR_MSG("allocation failed");
35       return NULL;
36    }
37 
38    pipe->dev = dev;
39    pipe->id = id;
40    p_atomic_set(&pipe->refcnt, 1);
41 
42    fd_pipe_get_param(pipe, FD_GPU_ID, &val);
43    pipe->dev_id.gpu_id = val;
44 
45    fd_pipe_get_param(pipe, FD_CHIP_ID, &val);
46    pipe->dev_id.chip_id = val;
47 
48    pipe->is_64bit = fd_dev_64b(&pipe->dev_id);
49 
50    /* Use the _NOSYNC flags because we don't want the control_mem bo to hold
51     * a reference to the ourself.  This also means that we won't be able
52     * to determine if the buffer is idle which is needed by bo-cache.  But
53     * pipe creation/destroy is not a high frequency event.
54     */
55    pipe->control_mem = fd_bo_new(dev, sizeof(*pipe->control),
56                                  FD_BO_CACHED_COHERENT | _FD_BO_NOSYNC,
57                                  "pipe-control");
58    pipe->control = fd_bo_map(pipe->control_mem);
59 
60    /* We could be getting a bo from the bo-cache, make sure the fence value
61     * is not garbage:
62     */
63    pipe->control->fence = 0;
64    pipe->control_mem->bo_reuse = NO_CACHE;
65 
66    return pipe;
67 }
68 
69 struct fd_pipe *
fd_pipe_new(struct fd_device * dev,enum fd_pipe_id id)70 fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id)
71 {
72    return fd_pipe_new2(dev, id, 1);
73 }
74 
75 struct fd_pipe *
fd_pipe_ref(struct fd_pipe * pipe)76 fd_pipe_ref(struct fd_pipe *pipe)
77 {
78    simple_mtx_lock(&fence_lock);
79    fd_pipe_ref_locked(pipe);
80    simple_mtx_unlock(&fence_lock);
81    return pipe;
82 }
83 
84 struct fd_pipe *
fd_pipe_ref_locked(struct fd_pipe * pipe)85 fd_pipe_ref_locked(struct fd_pipe *pipe)
86 {
87    simple_mtx_assert_locked(&fence_lock);
88    pipe->refcnt++;
89    return pipe;
90 }
91 
92 void
fd_pipe_del(struct fd_pipe * pipe)93 fd_pipe_del(struct fd_pipe *pipe)
94 {
95    simple_mtx_lock(&fence_lock);
96    fd_pipe_del_locked(pipe);
97    simple_mtx_unlock(&fence_lock);
98 }
99 
100 void
fd_pipe_del_locked(struct fd_pipe * pipe)101 fd_pipe_del_locked(struct fd_pipe *pipe)
102 {
103    simple_mtx_assert_locked(&fence_lock);
104    if (--pipe->refcnt)
105       return;
106 
107    fd_bo_del(pipe->control_mem);
108    pipe->funcs->destroy(pipe);
109 }
110 
111 /**
112  * Flush any unflushed deferred submits.  This is called at context-
113  * destroy to make sure we don't leak unflushed submits.
114  */
115 void
fd_pipe_purge(struct fd_pipe * pipe)116 fd_pipe_purge(struct fd_pipe *pipe)
117 {
118    struct fd_device *dev = pipe->dev;
119    struct fd_fence *unflushed_fence = NULL;
120 
121    simple_mtx_lock(&dev->submit_lock);
122 
123    /* We only queue up deferred submits for a single pipe at a time, so
124     * if there is a deferred_submits_fence on the same pipe as us, we
125     * know we have deferred_submits queued, which need to be flushed:
126     */
127    if (dev->deferred_submits_fence && dev->deferred_submits_fence->pipe == pipe) {
128       unflushed_fence = fd_fence_ref(dev->deferred_submits_fence);
129    }
130 
131    simple_mtx_unlock(&dev->submit_lock);
132 
133    if (unflushed_fence) {
134       fd_fence_flush(unflushed_fence);
135       fd_fence_del(unflushed_fence);
136    }
137 
138    if (pipe->funcs->finish)
139       pipe->funcs->finish(pipe);
140 }
141 
142 int
fd_pipe_get_param(struct fd_pipe * pipe,enum fd_param_id param,uint64_t * value)143 fd_pipe_get_param(struct fd_pipe *pipe, enum fd_param_id param, uint64_t *value)
144 {
145    return pipe->funcs->get_param(pipe, param, value);
146 }
147 
148 int
fd_pipe_set_param(struct fd_pipe * pipe,enum fd_param_id param,uint64_t value)149 fd_pipe_set_param(struct fd_pipe *pipe, enum fd_param_id param, uint64_t value)
150 {
151    return pipe->funcs->set_param(pipe, param, value);
152 }
153 
154 const struct fd_dev_id *
fd_pipe_dev_id(struct fd_pipe * pipe)155 fd_pipe_dev_id(struct fd_pipe *pipe)
156 {
157    return &pipe->dev_id;
158 }
159 
160 int
fd_pipe_wait(struct fd_pipe * pipe,const struct fd_fence * fence)161 fd_pipe_wait(struct fd_pipe *pipe, const struct fd_fence *fence)
162 {
163    return fd_pipe_wait_timeout(pipe, fence, ~0);
164 }
165 
166 int
fd_pipe_wait_timeout(struct fd_pipe * pipe,const struct fd_fence * fence,uint64_t timeout)167 fd_pipe_wait_timeout(struct fd_pipe *pipe, const struct fd_fence *fence,
168                      uint64_t timeout)
169 {
170    if (!fd_fence_after(fence->ufence, pipe->control->fence))
171       return 0;
172 
173    if (!timeout)
174       return -ETIMEDOUT;
175 
176    fd_pipe_flush(pipe, fence->ufence);
177 
178    return pipe->funcs->wait(pipe, fence, timeout);
179 }
180 
181 uint32_t
fd_pipe_emit_fence(struct fd_pipe * pipe,struct fd_ringbuffer * ring)182 fd_pipe_emit_fence(struct fd_pipe *pipe, struct fd_ringbuffer *ring)
183 {
184    uint32_t fence = ++pipe->last_fence;
185    unsigned gen = fd_dev_gen(&pipe->dev_id);
186 
187    if (gen >= A7XX) {
188       OUT_PKT7(ring, CP_EVENT_WRITE7, 4);
189       OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
190                CP_EVENT_WRITE7_0_WRITE_SRC(EV_WRITE_USER_32B) |
191                CP_EVENT_WRITE7_0_WRITE_DST(EV_DST_RAM) |
192                CP_EVENT_WRITE7_0_WRITE_ENABLED);
193       OUT_RELOC(ring, control_ptr(pipe, fence));   /* ADDR_LO/HI */
194       OUT_RING(ring, fence);
195    } else if (gen >= A5XX) {
196       OUT_PKT7(ring, CP_EVENT_WRITE, 4);
197       OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS));
198       OUT_RELOC(ring, control_ptr(pipe, fence));   /* ADDR_LO/HI */
199       OUT_RING(ring, fence);
200    } else {
201       OUT_PKT3(ring, CP_EVENT_WRITE, 3);
202       OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS));
203       OUT_RELOC(ring, control_ptr(pipe, fence));   /* ADDR */
204       OUT_RING(ring, fence);
205    }
206 
207    return fence;
208 }
209 
210 struct fd_fence *
fd_fence_new(struct fd_pipe * pipe,bool use_fence_fd)211 fd_fence_new(struct fd_pipe *pipe, bool use_fence_fd)
212 {
213    struct fd_fence *f = calloc(1, sizeof(*f));
214 
215    f->refcnt = 1;
216    f->pipe = fd_pipe_ref(pipe);
217    util_queue_fence_init(&f->ready);
218    f->use_fence_fd = use_fence_fd;
219    f->fence_fd = -1;
220 
221    return f;
222 }
223 
224 struct fd_fence *
fd_fence_ref(struct fd_fence * f)225 fd_fence_ref(struct fd_fence *f)
226 {
227    simple_mtx_lock(&fence_lock);
228    fd_fence_ref_locked(f);
229    simple_mtx_unlock(&fence_lock);
230 
231    return f;
232 }
233 
234 struct fd_fence *
fd_fence_ref_locked(struct fd_fence * f)235 fd_fence_ref_locked(struct fd_fence *f)
236 {
237    simple_mtx_assert_locked(&fence_lock);
238    f->refcnt++;
239    return f;
240 }
241 
242 void
fd_fence_del(struct fd_fence * f)243 fd_fence_del(struct fd_fence *f)
244 {
245    simple_mtx_lock(&fence_lock);
246    fd_fence_del_locked(f);
247    simple_mtx_unlock(&fence_lock);
248 }
249 
250 void
fd_fence_del_locked(struct fd_fence * f)251 fd_fence_del_locked(struct fd_fence *f)
252 {
253    simple_mtx_assert_locked(&fence_lock);
254 
255    if (--f->refcnt)
256       return;
257 
258    fd_pipe_del_locked(f->pipe);
259 
260    if (f->use_fence_fd && (f->fence_fd != -1))
261       close(f->fence_fd);
262 
263    free(f);
264 }
265 
266 /**
267  * Wait until corresponding submit is flushed to kernel
268  */
269 void
fd_fence_flush(struct fd_fence * f)270 fd_fence_flush(struct fd_fence *f)
271 {
272    MESA_TRACE_FUNC();
273    /*
274     * TODO we could simplify this to remove the flush_sync part of
275     * fd_pipe_sp_flush() and just rely on the util_queue_fence_wait()
276     */
277    fd_pipe_flush(f->pipe, f->ufence);
278    util_queue_fence_wait(&f->ready);
279 }
280 
281 int
fd_fence_wait(struct fd_fence * f)282 fd_fence_wait(struct fd_fence *f)
283 {
284    MESA_TRACE_FUNC();
285    return fd_pipe_wait(f->pipe, f);
286 }
287